repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos/rapids-triton/cpp/test/triton | rapidsai_public_repos/rapids-triton/cpp/test/triton/api/model_finalize.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rapids_triton/triton/api/model_finalize.hpp>
| 0 |
rapidsai_public_repos/rapids-triton/cpp/test/triton | rapidsai_public_repos/rapids-triton/cpp/test/triton/api/model_initialize.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rapids_triton/triton/api/model_initialize.hpp>
| 0 |
rapidsai_public_repos/rapids-triton/cpp/test/triton | rapidsai_public_repos/rapids-triton/cpp/test/triton/api/execute.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rapids_triton/triton/api/execute.hpp>
| 0 |
rapidsai_public_repos/rapids-triton/cpp/test/triton | rapidsai_public_repos/rapids-triton/cpp/test/triton/api/instance_initialize.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rapids_triton/triton/api/instance_initialize.hpp>
| 0 |
rapidsai_public_repos/rapids-triton/cpp/test | rapidsai_public_repos/rapids-triton/cpp/test/batch/batch.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rapids_triton/batch/batch.hpp>
| 0 |
rapidsai_public_repos/rapids-triton/cpp/test | rapidsai_public_repos/rapids-triton/cpp/test/memory/buffer.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef TRITON_ENABLE_GPU
#include <cuda_runtime_api.h>
#endif
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <cstring>
#include <rapids_triton/build_control.hpp>
#include <rapids_triton/exceptions.hpp>
#include <rapids_triton/memory/buffer.hpp>
#include <rapids_triton/memory/types.hpp>
#include <vector>
namespace triton {
namespace backend {
namespace rapids {
TEST(RapidsTriton, default_buffer)
{
auto buffer = Buffer<int>();
EXPECT_EQ(buffer.mem_type(), HostMemory);
EXPECT_EQ(buffer.size(), 0);
EXPECT_EQ(buffer.data(), nullptr);
EXPECT_EQ(buffer.device(), 0);
EXPECT_EQ(buffer.stream(), cudaStream_t{});
#ifdef TRITON_ENABLE_GPU
auto stream = cudaStream_t{};
cudaStreamCreate(&stream);
buffer.set_stream(stream);
EXPECT_EQ(buffer.stream(), stream);
cudaStreamDestroy(stream);
#endif
}
TEST(RapidsTriton, device_buffer)
{
auto data = std::vector<int>{1, 2, 3};
#ifdef TRITON_ENABLE_GPU
auto buffer = Buffer<int>(data.size(), DeviceMemory, 0, 0);
ASSERT_EQ(buffer.mem_type(), DeviceMemory);
ASSERT_EQ(buffer.size(), data.size());
ASSERT_NE(buffer.data(), nullptr);
auto data_out = std::vector<int>(data.size());
cudaMemcpy(static_cast<void*>(buffer.data()),
static_cast<void*>(data.data()),
sizeof(int) * data.size(),
cudaMemcpyHostToDevice);
cudaMemcpy(static_cast<void*>(data_out.data()),
static_cast<void*>(buffer.data()),
sizeof(int) * data.size(),
cudaMemcpyDeviceToHost);
EXPECT_THAT(data_out, ::testing::ElementsAreArray(data));
#else
EXPECT_THROW(Buffer<int>(data.size(), DeviceMemory, 0, 0), TritonException);
#endif
}
TEST(RapidsTriton, non_owning_device_buffer)
{
auto data = std::vector<int>{1, 2, 3};
#ifdef TRITON_ENABLE_GPU
auto* ptr_d = static_cast<int*>(nullptr);
cudaMalloc(reinterpret_cast<void**>(&ptr_d), sizeof(int) * data.size());
cudaMemcpy(static_cast<void*>(ptr_d),
static_cast<void*>(data.data()),
sizeof(int) * data.size(),
cudaMemcpyHostToDevice);
auto buffer = Buffer<int>(ptr_d, data.size(), DeviceMemory);
ASSERT_EQ(buffer.mem_type(), DeviceMemory);
ASSERT_EQ(buffer.size(), data.size());
ASSERT_EQ(buffer.data(), ptr_d);
auto data_out = std::vector<int>(data.size());
cudaMemcpy(static_cast<void*>(data_out.data()),
static_cast<void*>(buffer.data()),
sizeof(int) * data.size(),
cudaMemcpyDeviceToHost);
EXPECT_THAT(data_out, ::testing::ElementsAreArray(data));
cudaFree(reinterpret_cast<void*>(ptr_d));
#else
ASSERT_THROW(Buffer<int>(data.data(), data.size(), DeviceMemory), TritonException);
#endif
}
TEST(RapidsTriton, host_buffer)
{
auto data = std::vector<int>{1, 2, 3};
auto buffer = Buffer<int>(data.size(), HostMemory, 0, 0);
ASSERT_EQ(buffer.mem_type(), HostMemory);
ASSERT_EQ(buffer.size(), data.size());
ASSERT_NE(buffer.data(), nullptr);
std::memcpy(
static_cast<void*>(buffer.data()), static_cast<void*>(data.data()), data.size() * sizeof(int));
auto data_out = std::vector<int>(buffer.data(), buffer.data() + buffer.size());
EXPECT_THAT(data_out, ::testing::ElementsAreArray(data));
}
TEST(RapidsTriton, non_owning_host_buffer)
{
auto data = std::vector<int>{1, 2, 3};
auto buffer = Buffer<int>(data.data(), data.size(), HostMemory);
ASSERT_EQ(buffer.mem_type(), HostMemory);
ASSERT_EQ(buffer.size(), data.size());
ASSERT_EQ(buffer.data(), data.data());
auto data_out = std::vector<int>(buffer.data(), buffer.data() + buffer.size());
EXPECT_THAT(data_out, ::testing::ElementsAreArray(data));
}
TEST(RapidsTriton, copy_buffer)
{
auto data = std::vector<int>{1, 2, 3};
auto orig_buffer = Buffer<int>(data.data(), data.size(), HostMemory);
auto buffer = Buffer<int>(orig_buffer);
ASSERT_EQ(buffer.mem_type(), HostMemory);
ASSERT_EQ(buffer.size(), data.size());
ASSERT_NE(buffer.data(), orig_buffer.data());
auto data_out = std::vector<int>(buffer.data(), buffer.data() + buffer.size());
EXPECT_THAT(data_out, ::testing::ElementsAreArray(data));
}
TEST(RapidsTriton, move_buffer)
{
auto data = std::vector<int>{1, 2, 3};
auto buffer = Buffer<int>(Buffer<int>(data.data(), data.size(), HostMemory));
ASSERT_EQ(buffer.mem_type(), HostMemory);
ASSERT_EQ(buffer.size(), data.size());
ASSERT_EQ(buffer.data(), data.data());
auto data_out = std::vector<int>(buffer.data(), buffer.data() + buffer.size());
EXPECT_THAT(data_out, ::testing::ElementsAreArray(data));
}
TEST(RapidsTriton, move_assignment_buffer)
{
auto data = std::vector<int>{1, 2, 3};
#ifdef TRITON_ENABLE_GPU
auto buffer = Buffer<int>{data.data(), data.size() - 1, DeviceMemory};
#else
auto buffer = Buffer<int>{data.data(), data.size() - 1, HostMemory};
#endif
buffer = Buffer<int>{data.size(), HostMemory};
ASSERT_EQ(buffer.mem_type(), HostMemory);
ASSERT_EQ(buffer.size(), data.size());
}
} // namespace rapids
} // namespace backend
} // namespace triton
| 0 |
rapidsai_public_repos/rapids-triton/cpp/test | rapidsai_public_repos/rapids-triton/cpp/test/memory/types.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rapids_triton/memory/types.hpp>
| 0 |
rapidsai_public_repos/rapids-triton/cpp/test | rapidsai_public_repos/rapids-triton/cpp/test/memory/resource.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef TRITON_ENABLE_GPU
#include <cuda_runtime_api.h>
#include <rmm/cuda_device.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#endif
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <rapids_triton/build_control.hpp>
#include <rapids_triton/exceptions.hpp>
#include <rapids_triton/memory/resource.hpp>
namespace triton {
namespace backend {
namespace rapids {
TEST(RapidsTriton, set_memory_resource)
{
#ifdef TRITON_ENABLE_GPU
auto device_id = int{};
cuda_check(cudaGetDevice(&device_id));
EXPECT_EQ(rmm::mr::get_current_device_resource()->is_equal(rmm::mr::cuda_memory_resource{}),
true);
setup_memory_resource(device_id);
EXPECT_EQ(rmm::mr::get_current_device_resource()->is_equal(rmm::mr::cuda_memory_resource{}),
false);
#else
setup_memory_resource(0);
#endif
}
} // namespace rapids
} // namespace backend
} // namespace triton
| 0 |
rapidsai_public_repos/rapids-triton/cpp/test/memory | rapidsai_public_repos/rapids-triton/cpp/test/memory/detail/copy.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef TRITON_ENABLE_GPU
#include <cuda_runtime_api.h>
#include <rapids_triton/memory/detail/gpu_only/copy.hpp>
#else
#include <rapids_triton/memory/detail/cpu_only/copy.hpp>
#endif
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <rapids_triton/build_control.hpp>
#include <rapids_triton/memory/types.hpp>
#include <vector>
namespace triton {
namespace backend {
namespace rapids {
TEST(RapidsTriton, copy)
{
auto data = std::vector<int>{1, 2, 3};
auto data_out = std::vector<int>(data.size());
detail::copy(data_out.data(), data.data(), data.size(), 0, HostMemory, HostMemory);
EXPECT_THAT(data_out, ::testing::ElementsAreArray(data));
data_out = std::vector<int>(data.size());
#ifdef TRITON_ENABLE_GPU
auto* ptr_d = static_cast<int*>(nullptr);
cudaMalloc(reinterpret_cast<void**>(&ptr_d), sizeof(int) * data.size());
detail::copy(ptr_d, data.data(), data.size(), 0, DeviceMemory, HostMemory);
cudaMemcpy(static_cast<void*>(data_out.data()),
static_cast<void*>(ptr_d),
sizeof(int) * data.size(),
cudaMemcpyDeviceToHost);
EXPECT_THAT(data_out, ::testing::ElementsAreArray(data));
cudaFree(reinterpret_cast<void*>(ptr_d));
#else
EXPECT_THROW(detail::copy(data_out.data(), data.data(), data.size(), 0, HostMemory, DeviceMemory),
TritonException);
EXPECT_THROW(detail::copy(data_out.data(), data.data(), data.size(), 0, DeviceMemory, HostMemory),
TritonException);
#endif
}
} // namespace rapids
} // namespace backend
} // namespace triton
| 0 |
rapidsai_public_repos/rapids-triton/cpp/test/memory | rapidsai_public_repos/rapids-triton/cpp/test/memory/detail/owned_device_buffer.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef TRITON_ENABLE_GPU
#include <cuda_runtime_api.h>
#include <rapids_triton/memory/detail/gpu_only/owned_device_buffer.hpp>
#else
#include <rapids_triton/memory/detail/cpu_only/owned_device_buffer.hpp>
#endif
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <rapids_triton/build_control.hpp>
#include <rapids_triton/exceptions.hpp>
#include <vector>
namespace triton {
namespace backend {
namespace rapids {
TEST(RapidsTriton, owned_device_buffer)
{
auto data = std::vector<int>{1, 2, 3};
#ifdef TRITON_ENABLE_GPU
auto device_id = 0;
cudaGetDevice(&device_id);
auto stream = cudaStream_t{};
cudaStreamCreate(&stream);
auto buffer = detail::owned_device_buffer<int, IS_GPU_BUILD>(device_id, data.size(), stream);
auto data_out = std::vector<int>(data.size());
cudaMemcpy(static_cast<void*>(buffer.get()),
static_cast<void*>(data.data()),
sizeof(int) * data.size(),
cudaMemcpyHostToDevice);
cudaMemcpy(static_cast<void*>(data_out.data()),
static_cast<void*>(buffer.get()),
sizeof(int) * data.size(),
cudaMemcpyDeviceToHost);
EXPECT_THAT(data_out, ::testing::ElementsAreArray(data));
cudaStreamDestroy(stream);
#else
// Workaround for ungraceful handling of multiple template parameters in
// EXPECT_THROW
using dev_buffer = detail::owned_device_buffer<int, IS_GPU_BUILD>;
EXPECT_THROW(dev_buffer(0, data.size(), 0), TritonException);
#endif
}
} // namespace rapids
} // namespace backend
} // namespace triton
| 0 |
rapidsai_public_repos/rapids-triton/cpp/test | rapidsai_public_repos/rapids-triton/cpp/test/tensor/dtype.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <rapids_triton/tensor/dtype.hpp>
namespace triton {
namespace backend {
namespace rapids {
template <DType D>
void check_dtype_conversion()
{
EXPECT_EQ(D, TritonDtype<typename TritonType<D>::type>::value);
EXPECT_EQ(D, TritonDtype<typename TritonType<D>::type const>::value);
}
TEST(RapidsTriton, dtype)
{
check_dtype_conversion<DTypeBool>();
check_dtype_conversion<DTypeUint8>();
check_dtype_conversion<DTypeChar>();
check_dtype_conversion<DTypeByte>();
check_dtype_conversion<DTypeUint16>();
check_dtype_conversion<DTypeUint32>();
check_dtype_conversion<DTypeUint64>();
check_dtype_conversion<DTypeInt8>();
check_dtype_conversion<DTypeInt16>();
check_dtype_conversion<DTypeInt32>();
check_dtype_conversion<DTypeInt64>();
check_dtype_conversion<DTypeFloat32>();
check_dtype_conversion<DTypeFloat64>();
}
} // namespace rapids
} // namespace backend
} // namespace triton
| 0 |
rapidsai_public_repos/rapids-triton/cpp/test | rapidsai_public_repos/rapids-triton/cpp/test/tensor/tensor.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef TRITON_ENABLE_GPU
#include <cuda_runtime_api.h>
#else
#include <rapids_triton/cpu_only/cuda_runtime_replacement.hpp>
#endif
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <rapids_triton/tensor/dtype.hpp>
#include <rapids_triton/tensor/tensor.hpp>
#include <vector>
namespace triton {
namespace backend {
namespace rapids {
TEST(RapidsTriton, default_tensor)
{
auto tensor = Tensor<int>();
EXPECT_EQ(tensor.buffer().size(), 0);
EXPECT_EQ(tensor.shape().size(), 0);
}
TEST(RapidsTriton, move_buffer_tensor)
{
auto shape = std::vector<std::size_t>{2, 2};
auto data = std::vector<int>{1, 2, 3, 4};
auto tensor = Tensor<int>(shape, Buffer<int>{data.data(), data.size(), HostMemory});
EXPECT_EQ(data.data(), tensor.data());
EXPECT_EQ(data.size(), tensor.size());
EXPECT_THAT(tensor.shape(), ::testing::ElementsAreArray(shape));
EXPECT_EQ(tensor.dtype(), DTypeInt32);
EXPECT_EQ(tensor.mem_type(), HostMemory);
EXPECT_EQ(tensor.stream(), cudaStream_t{});
EXPECT_EQ(tensor.device(), 0);
auto data_out = std::vector<int>(tensor.data(), tensor.data() + tensor.size());
EXPECT_THAT(data_out, ::testing::ElementsAreArray(data));
}
TEST(RapidsTriton, multi_buffer_tensor)
{
auto shape = std::vector<std::size_t>{2, 2};
auto data = std::vector<int>{1, 2, 3, 4};
auto all_buffers = std::vector<Buffer<int>>{};
all_buffers.reserve(data.size());
auto mem_type = HostMemory;
if constexpr (IS_GPU_BUILD) { mem_type = DeviceMemory; }
std::transform(data.begin(), data.end(), std::back_inserter(all_buffers), [mem_type](auto& elem) {
return Buffer<int>{&elem, 1, mem_type};
});
auto tensor =
Tensor<int>(shape, all_buffers.begin(), all_buffers.end(), mem_type, 0, cudaStream_t{});
auto data_out = std::vector<int>(data.size());
#ifdef TRITON_ENABLE_GPU
cudaMemcpy(static_cast<void*>(data_out.data()),
static_cast<void*>(tensor.data()),
sizeof(int) * tensor.size(),
cudaMemcpyDeviceToHost);
#else
std::memcpy(static_cast<void*>(data_out.data()),
static_cast<void*>(tensor.data()),
sizeof(int) * tensor.size());
#endif
EXPECT_THAT(data_out, ::testing::ElementsAreArray(data));
}
TEST(RapidsTriton, tensor_copy)
{
auto shape = std::vector<std::size_t>{2, 2};
auto data = std::vector<int>{1, 2, 3, 4};
auto data1 = data;
auto tensor1 = Tensor<int>(shape, Buffer<int>{data.data(), data.size(), HostMemory});
auto data2 = std::vector<int>(data1.size());
auto tensor2 = Tensor<int>(shape, Buffer<int>{data.data(), data.size(), HostMemory});
rapids::copy(tensor2, tensor1);
auto data_out = std::vector<int>(tensor2.data(), tensor2.data() + tensor2.size());
EXPECT_THAT(data_out, ::testing::ElementsAreArray(data));
auto small_shape = std::vector<std::size_t>{2};
auto small_data = std::vector<int>(2);
auto tensor3 =
Tensor<int>(small_shape, Buffer<int>{small_data.data(), small_data.size(), HostMemory});
EXPECT_THROW(rapids::copy(tensor3, tensor1), TritonException);
}
TEST(RapidsTriton, tensor_multi_copy)
{
auto shape = std::vector<std::size_t>{2, 2};
auto data = std::vector<int>{1, 2, 3, 4};
auto data1 = data;
auto tensor1 = Tensor<int>(shape, Buffer<int>{data.data(), data.size(), HostMemory});
auto receiver_shape = std::vector<std::size_t>{1};
auto receivers = std::vector<Tensor<int>>{};
receivers.reserve(data.size());
std::transform(
data.begin(), data.end(), std::back_inserter(receivers), [&receiver_shape](auto& val) {
return Tensor<int>(receiver_shape, Buffer<int>{std::size_t{1}, HostMemory});
});
rapids::copy(receivers.begin(), receivers.end(), tensor1);
auto data_out = std::vector<int>{};
data_out.reserve(receivers.size());
std::transform(
receivers.begin(), receivers.end(), std::back_inserter(data_out), [](auto& tensor) {
return *tensor.data();
});
EXPECT_THAT(data_out, ::testing::ElementsAreArray(data));
// Throw if trying to copy to too many outputs
receivers.emplace_back(receiver_shape, Buffer<int>{std::size_t{1}, HostMemory});
EXPECT_THROW(rapids::copy(receivers.begin(), receivers.end(), tensor1), TritonException);
}
} // namespace rapids
} // namespace backend
} // namespace triton
| 0 |
rapidsai_public_repos/rapids-triton | rapidsai_public_repos/rapids-triton/docs/usage.md | <!--
Copyright (c) 2021, NVIDIA CORPORATION.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# Using RAPIDS-Triton
## Getting Started
To begin developing a custom backend with RAPIDS-Triton, we strongly recommend
that you take advantage of the [rapids-triton-template
repo](https://github.com/rapidsai/rapids-triton-template), which provides a
basic template for your backend code. If this is your first time developing a
backend with RAPIDS-Triton, the easiest way to get started is to follow the
[Linear Example](https://github.com/rapidsai/rapids-triton-linear-example).
This provides a detailed walkthrough of every step in the process of creating a
backend with example code. The rest of these usage docs will provide general
information on specific features you are likely to take advantage of when
building your backend.
## Logging
To provide logging messages in your backend, RAPIDS-Triton provides `log_info`,
`log_warn`, `log_error`, and `log_debug`. During default Triton execution, all
logging messages up to (but not including) debug level will be visible. These
functions can be invoked in two ways and can optionally include file and line
information. To add a logging message to your code, use one of the following
invocations:
```cpp
#include <rapids_triton/triton/logging.hpp>
void logging_example() {
rapids::log_info() << "This is a log message.";
rapids::log_info("This is an equivalent invocation.");
rapids::log_info(__FILE__, __LINE__) << "This one has file and line info.";
rapids::log_info(__FILE__, __LINE__, "And so does this one.");
}
```
## Error Handling
If you encounter an error condition at any point in your backend which cannot
be otherwise handled, you should throw a `TritonException`. In most cases, this
error will be gracefully handled and passed to the Triton server in a way that
will not interfere with execution of other backends, models, or requests.
`TritonException` objects are constructed with an error type and a message
indicating what went wrong, as shown below:
```cpp
#include <rapids_triton/exceptions.hpp>
void error_example() {
throw rapids::TritonException(rapids::Error::Internal, "Something bad happened!");
}
```
Available error types are:
* `Internal`: The most common error type. Used when an unexpected condition
which is not the result of bad user input (e.g. CUDA error).
* `NotFound`: An error type returned when a named resource (e.g. named CUDA IPC
memory block) cannot be found.
* `InvalidArg`: An error type returned when the user has provided invalid input
in a configuration file or request.
* `Unavailable`: An error returned when a resource exists but is currently
unavailable.
* `Unsupported`: An error which indicates that a requested functionality is not
implemented by this backend (e.g. GPU execution for a CPU-only backend).
* `AlreadyExists`: An error which indicates that a resource which is being
created has already been created.
* `Unknown`: The type of the error cannot be established. This type should be
avoided wherever possible.
The `cuda_check` function is provided to help facilitate error handling of
direct invocations of the CUDA API. If such an invocation fails, `cuda_check`
will throw an appropriate `TritonException`:
```cpp
#include <rapids_triton/exceptions.hpp>
void cuda_check_example() {
rapids::cuda_check(cudaSetDevice(0));
}
```
If a `TritonException` is thrown while a backend is being loaded, Triton's
server logs will indicate the failure and include the error message. If a
`TritonException` is thrown while a model is being loaded, Triton's server logs
will display the error message in the loading logs for that model. If a
`TritonException` is thrown during handling of a request, the client will
receive an indication that the request failed along with the error message, but
the model can continue to process other requests.
## CPU-Only Builds
Most Triton backends include support for builds intended to support only CPU
execution. While this is not required, RAPIDS-Triton includes a compile-time
constant which can be useful for facilitating this:
```cpp
#include <rapids_triton/build_control.hpp>
void do_a_gpu_thing() {
if constexpr (rapids::IS_GPU_BUILD) {
rapids::log_info("Executing on GPU...");
} else {
rapids::log_error("Can't do that! This is a CPU-only build.");
}
}
```
You can also make use of the preprocessor identifier `TRITON_ENABLE_GPU` for
conditional inclusion of headers:
```cpp
#ifdef TRITON_ENABLE_GPU
#include <gpu_stuff.h>
#endif
```
Sometimes, having a CUDA symbol available in a CPU-only build can avoid layers
of indirection which would otherwise be required to allow for compilation of
both GPU and CPU versions of particular code. RAPIDS-Triton includes a header
which has some placeholders for CUDA symbols used internally by the library,
and which may be useful for backends which implement CPU-only builds as well.
Note that all placeholder symbols are namespaced within
`triton::backend::rapids`. Note that not all symbols from the CUDA runtime API
are included, but additional symbols will be added over time. All placeholder
symbols will be implemented in a way that is consistent with similar
placeholders in the main Triton codebase. A typical usage is shown below:
```cpp
#ifdef TRITON_ENABLE_GPU
#include <cuda_runtime_api.h>
#else
#include <rapids_triton/cpu_only/cuda_runtime_replacement.hpp>
#endif
// E.g. cudaStream_t is now defined regardless of whether or not this is a
// CPU-only build.
```
## Buffers
Within a backend, it is often useful to process data in a way that is agnostic
to whether the underlying memory is on the host or on device and whether that
memory is owned by the backend or provided by Triton. For instance, a backend
may receive input data from Triton on the host and conditionally transfer it to
the GPU before processing. In this case, owned memory must be allocated on the
GPU to store the data, but after that point, the backend will treat the data
exactly the same as if Triton had provided it on device in the first place.
In order to handle such situations, RAPIDS-Triton provides the `Buffer` object.
When the `Buffer` is non-owning, it provides a lightweight wrapper to the
underlying memory. When it is owning, `Buffer` will handle any necessary
deallocation (on host or device). These objects can also be extremely useful
for passing data back and forth between host and device. The following examples
show ways in which `Buffer` objects can be constructed and used:
```cpp
#include <utility>
#include <vector>
#include <rapids_triton/memory/types.hpp> // rapids::HostMemory and rapids::DeviceMemory
#include <rapids_triton/memory/buffer.hpp> // rapids::Buffer
void buffer_examples() {
auto data = std::vector<int>{0, 1, 2, 3, 4};
// This buffer is a lightweight wrapper around the data stored in the `data`
// vector. Because this constructor takes an `int*` pointer as its first
// argument, it is assumed that the lifecycle of the underlying memory is
// separately managed.
auto non_owning_host_buffer = rapids::Buffer<int>(data.data(), rapids::HostMemory);
// This buffer owns its own memory on the host, with space for 5 ints. When
// it goes out of scope, the memory will be appropriately deallocated.
auto owning_host_buffer = rapids::Buffer<int>(5, rapids::HostMemory);
// This buffer is constructed as a copy of `non_owning_host_buffer`. Because
// its requested memory type is `DeviceMemory`, the data will be copied to a
// new (owned) GPU allocation. Device and stream can also be specified in the
// constructor.
auto owning_device_buffer = rapids::Buffer<int>(non_owning_host_buffer, rapids::DeviceMemory);
// Once again, because this constructor takes an `int*` pointer, it will
// simply be a lightweight wrapper around the memory that is actually managed
// by `owning_device_buffer`. Here we have omitted the memory type argument,
// since it defaults to `DeviceMemory`. This constructor can also accept
// device and stream arguments, and care should be taken to ensure that the
// right device is specified when the buffer does not allocate its own
// memory.
auto non_owning_device_buffer = rapids::Buffer<int>(owning_host_buffer.data());
auto base_buffer1 = rapids::Buffer<int>(data.data(), rapids::HostMemory);
// Because this buffer is on the host, just like the (moved-from) buffer it
// is being constructed from, it remains non-owning
auto non_owning_moved_buffer = rapids::Buffer<int>(std::move(base_buffer1), rapids::HostMemory);
auto base_buffer2 = rapids::Buffer<int>(data.data(), rapids::HostMemory);
// Because this buffer is on the device, unlike the (moved-from) buffer it is
// being constructed from, memory must be allocated on-device, and the new
// buffer becomes owning.
auto owning_moved_buffer = rapids::Buffer<int>(std::move(base_buffer2), rapids::DeviceMemory);
}
```
### Useful Methods
* `data()`: Return a raw pointer to the buffer's data
* `size()`: Return the number of elements contained by the buffer
* `mem_type()`: Return the type of memory (`HostMemory` or `DeviceMemory`)
contained by the buffer
* `device()`: Return the id of the device on which this buffer resides (always
0 for host buffers)
* `stream()`: Return the CUDA stream associated with this buffer.
* `stream_synchronize()`: Perform a stream synchronization on this buffer's
stream.
* `set_stream(cudaStream_t new_stream)`: Synchronize on the current stream and
then switch buffer to the new stream.
## Tensors
`Tensor` objects are wrappers around `Buffers` with some additional metadata
and functionality. All `Tensor` objects have a shape which can be retrieved as
a `std::vector` using the `shape()` method. A reference to the underlying
buffer can also be retrieved with the `buffer()` method.
`OutputTensor` objects are used to store data which will eventually be returned
as part of Triton's response to a client request. Their `finalize` methods are
used to actually marshal their underlying data into a response.
In general, `OutputTensor` objects should not be constructed directly but
should instead be retrieved using the `get_output` method of a `Model`
(described later).
## Moving Data: `rapids::copy`
Moving data around between host and device or simply between buffers of the
same type can be one of the more error-prone tasks outside of actual model
execution in a backend. To help make this process easier, RAPIDS-Triton
provides a number of overrides of the `rapids::copy` function, which provides a
safe way to mode data between buffers or tensors. Assuming the size attribute
of the buffer or tensor has not been corrupted, `rapids::copy` should never
result in segfaults or invalid memory access on device.
Additional overrides of `rapids::copy` exist, but we will describe the most
common uses of it here. Note that you need not worry about where the underlying
data is located (on host or device) when invoking `rapids::copy`. The function
will take care of detecting and handling this. `Tensor` overrides are in
`rapids_triton/tensor/tensor.hpp` and `Buffer` overrides are in
`rapids_triton/memory/buffer.hpp`.
### Between two buffers or tensors...
If you wish to simply copy the entire contents of one buffer into another or
one tensor into another, `rapids::copy` can be invoked as follows:
```cpp
rapids::copy(destination_buffer, source_buffer);
rapids::copy(destination_tensor, source_tensor);
```
If the destination is too small to contain the data from the source, a
`TritonException` will be thrown.
### From one tensor to many...
To distribute data from one tensor to many, the following override is
available:
```cpp
rapids::copy(iterator_to_first_destination, iterator_to_last_destination, source);
```
Note that destination tensors can be of different sizes. If the destination
buffers cannot contain all data from the source, a `TritonException` will be
thrown. Destination tensors can also be a mixture of device and host tensors if
desired.
### From part of one buffer to part of another...
To move data from part of one buffer to part of another, you can use another
override as in the following example:
```cpp
rapids::copy(destination_buffer, source_buffer, 10, 3, 6);
```
The extra arguments here provide the offset from the beginning of the
destination buffer to which data should be copied, the index of the beginning
element to be copied from the source, and the index one past the final element to
be copied from the source. Thus, this invocation will copy the third, fourth,
and fifth elements of the source buffer to the tenth, eleventh, and twelfth
elements of the destination. If the destination buffer only had room for (e.g.)
eleven elements, a `TritonException` would be thrown.
## `Model`
For a thorough introduction to developing a RAPIDS-Triton `Model` for your
backend, see the [Linear Example
repo](https://github.com/rapidsai/rapids-triton-linear-example). Here, we will
just briefly summarize some of the useful methods of `Model` objects.
### Non-Virtual Methods
* `get_input`: Used to retrieve an input tensor of a particular name from
Triton
* `get_output`: Used to retrieve an output tensor of a particular name from
Triton
* `get_config_param`: Used to retrieve a named parameter from the configuration
file for this model
* `get_device_id`: The device on which this model is deployed (0 for host
deployments)
* `get_deployment_type`: One of `GPUDeployment` or `CPUDeployment` depending on
whether this model is configured to be deployed on device or host
### Virtual Methods
* `predict`: The method which performs actual inference on input data and
stores it to the output location
* `load`: A method which can be overridden to load resources that will be used
for the lifetime of the model
* `unload`: A method used to unload any resources loaded in `load` if
necessary
* `preferred_mem_type`, `preferred_mem_type_in`, and `preferred_mem_type_out`:
The location (device or host) where input and output data should be stored.
The latter two methods can be overridden if input and output data should be
stored differently. Otherwise, `preferred_mem_type` will be used for both.
* `get_stream`: A method which can be overridden to provide different streams
for handling successive batches. Otherwise, the default stream associated
with this model will be used.
## `SharedState`
Multiple instances of a RAPIDS-Triton model may need to share some data between
them (or may choose to do so for efficiency). `SharedState` objects facilitate
this. For a thorough introduction to developing a RAPIDS-Triton `SharedState`
for your backend, see the [Linear Example
repo](https://github.com/rapidsai/rapids-triton-linear-example). Just like the
`Model` objects which share a particular `SharedState` object, configuration
parameters can be retrieved using `SharedState`'s `get_config_param` method.
Otherwise, most additional functionality is defined by the backend
implementation, including `load` and `unload` methods for any necessary
loading/unloading of resources that will be used for the lifetime of the shared
state.
Note that just one shared state is constructed by the server regardless of how
many instances of a given model are created.
## Other Memory Allocations
For most device memory allocations, it is strongly recommended that you simply
construct a `Buffer` of the correct size and type. However, if you absolutely
cannot use a `Buffer` in a particular context, you are encouraged to allocate
and deallocate device memory using [RMM](https://github.com/rapidsai/rmm). Any
memory managed in this way will make use of Triton's CUDA memory pool, which
will be faster than performing individual allocations. It is strongly
recommended that you not change the RMM device resource in your backend, since
doing so will cause allocations to no longer make use of Triton's memory pool.
| 0 |
rapidsai_public_repos/rapids-triton/ci | rapidsai_public_repos/rapids-triton/ci/local/build.sh | #!/bin/bash
set -e
REPODIR=$(cd $(dirname $0)/../../; pwd)
EXAMPLE_TAG=rapids_triton_identity \
TEST_TAG=rapids_triton_identity_test \
$REPODIR/build.sh
if [ -z $CUDA_VISIBLE_DEVICES ]
then
docker run -v "${REPODIR}/qa/logs:/qa/logs" --gpus all --rm rapids_triton_identity_test
else
docker run -v "${REPODIR}/qa/logs:/qa/logs" --gpus $CUDA_VISIBLE_DEVICES --rm rapids_triton_identity_test
fi
EXAMPLE_TAG=rapids_triton_identity:cpu \
TEST_TAG=rapids_triton_identity_test:cpu \
$REPODIR/build.sh --cpu-only
docker run -v "${REPODIR}/qa/logs:/qa/logs" --gpus all --rm rapids_triton_identity_test:cpu
| 0 |
rapidsai_public_repos/rapids-triton | rapidsai_public_repos/rapids-triton/qa/entrypoint.sh | #!/bin/bash
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
QA_DIR=$(cd $(dirname $0); pwd)
TEST_SCRIPT="$QA_DIR/run_tests.sh"
if [[ $TRITON_ENABLE_GPU != "OFF" ]]
then
MODEL_REPO="${QA_DIR}/L0_e2e/model_repository" "$TEST_SCRIPT"
export TEST_EXE='' # Only run unit tests once
MODEL_REPO="${QA_DIR}/L0_e2e/cpu_model_repository" "$TEST_SCRIPT"
fi
CPU_ONLY=1 MODEL_REPO="${QA_DIR}/L0_e2e/cpu_model_repository" "$TEST_SCRIPT"
| 0 |
rapidsai_public_repos/rapids-triton | rapidsai_public_repos/rapids-triton/qa/run_tests.sh | #!/bin/bash
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
QA_DIR=$(cd $(dirname $0); pwd)
SERVER_ARGS=""
UUID="$(cat /proc/sys/kernel/random/uuid)"
CONTAINER_NAME="rapids_triton-ci-$UUID"
DOCKER_RUN=0
DOCKER_ARGS="-d -p 8000:8000 -p 8001:8001 -p 8002:8002 --name ${CONTAINER_NAME}"
TRITON_PID=""
LOG_DIR="${QA_DIR}/logs"
SERVER_LOG="${LOG_DIR}/${UUID}-server.log"
if [ ! -d "${LOG_DIR}" ]
then
mkdir -p "${LOG_DIR}"
fi
if [ -z $MODEL_REPO ]
then
MODEL_REPO="${QA_DIR}/L0_e2e/model_repository"
fi
MODEL_REPO="$(readlink -f $MODEL_REPO)"
DOCKER_ARGS="${DOCKER_ARGS} -v ${MODEL_REPO}:/models"
if [ -z $CPU_ONLY ] || [ $CPU_ONLY -eq 0 ]
then
if [[ -v CUDA_VISIBLE_DEVICES ]]
then
if [ -z $CUDA_VISIBLE_DEVICES ]
then
CPU_ONLY=1
else
DOCKER_ARGS="${DOCKER_ARGS} --gpus ${CUDA_VISIBLE_DEVICES}"
fi
else
DOCKER_ARGS="${DOCKER_ARGS} --gpus all"
fi
else
export CUDA_VISIBLE_DEVICES=""
fi
# If a Triton Docker image has been provided or no tritonserver executable is
# available, run the server via Docker
if [ ! -z $TRITON_IMAGE ] || ! command -v tritonserver
then
DOCKER_RUN=1
TRITON_IMAGE=${TRITON_IMAGE:-rapids_triton_identity}
SERVER_ARGS="${SERVER_ARGS} --model-repository=/models"
else
SERVER_ARGS="${SERVER_ARGS} --model-repository=${MODEL_REPO}"
fi
start_server() {
if [ $DOCKER_RUN -eq 1 ]
then
docker run $DOCKER_ARGS $TRITON_IMAGE > /dev/null
else
tritonserver $SERVER_ARGS > $SERVER_LOG 2>&1 &
TRITON_PID="$!"
fi
}
start_server
if [ -z $TEST_EXE ]
then
echo 'No TEST_EXE variable defined; skipping unit tests'
else
if [ $DOCKER_RUN -eq 1 ]
then
docker exec $CONTAINER_NAME "$TEST_EXE"
else
"$TEST_EXE"
fi
fi
finally() {
if [ -z $TRITON_PID ]
then
docker logs $CONTAINER_NAME > $SERVER_LOG 2>&1
docker rm -f $CONTAINER_NAME > /dev/null 2>&1
else
kill -15 $TRITON_PID
wait
fi
}
trap finally EXIT
pytest "$QA_DIR"
| 0 |
rapidsai_public_repos/rapids-triton/qa | rapidsai_public_repos/rapids-triton/qa/L0_e2e/test_model.py | # Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent
import os
import numpy as np
import pytest
from rapids_triton import Client
from rapids_triton.testing import get_random_seed, arrays_close
TOTAL_SAMPLES = 8192
def valid_shm_modes():
modes = [None]
if os.environ.get('CPU_ONLY', 0) == 0:
modes.append('cuda')
return modes
@pytest.fixture(scope='session')
def model_versions():
return (1, 2)
@pytest.fixture(scope='session')
def client():
client = Client()
client.wait_for_server(60)
return client
@pytest.fixture
def model_inputs():
np.random.seed(get_random_seed())
return {
input_name:
np.random.rand(TOTAL_SAMPLES, 1).astype('float32')
for input_name in ('input__0',)
}
@pytest.fixture
def model_output_sizes():
return {'output__0': TOTAL_SAMPLES * np.dtype('float32').itemsize}
def get_ground_truth(inputs):
return {'output__0': inputs['input__0']}
@pytest.mark.parametrize("model_name", ['identity'])
@pytest.mark.parametrize("shared_mem", valid_shm_modes())
def test_model(client, model_name, shared_mem, model_inputs, model_output_sizes):
result = client.predict(
model_name, model_inputs, model_output_sizes, shared_mem=shared_mem
)
ground_truth = get_ground_truth(model_inputs)
for output_name in sorted(ground_truth.keys()):
arrays_close(
result[output_name],
ground_truth[output_name],
atol=1e-5,
assert_close=True
)
@pytest.mark.parametrize("model_name", ['identity'])
@pytest.mark.parametrize("shared_mem", valid_shm_modes())
@pytest.mark.parametrize(
"batch_size",
[1, TOTAL_SAMPLES // 3, TOTAL_SAMPLES // 2]
)
def test_predict_async(client, model_name, shared_mem, model_inputs, batch_size):
results = []
gt_results = []
for i in range(
0,
TOTAL_SAMPLES // batch_size + int(bool(TOTAL_SAMPLES % batch_size))
):
min_index = i * batch_size
max_index = min((i + 1) * batch_size, TOTAL_SAMPLES)
cur_input = {name: arr[min_index: max_index] for name, arr in
model_inputs.items()}
cur_output_size = {
'output__0': (max_index - min_index) * np.dtype('float32').itemsize
}
results.append(client.predict_async(
model_name, cur_input, cur_output_size, shared_mem=shared_mem
))
gt_results.append(get_ground_truth(cur_input))
concurrent.futures.wait(results, timeout=60)
results = [result_.result() for result_ in results]
for result, ground_truth in zip(results, gt_results):
for output_name in sorted(ground_truth.keys()):
arrays_close(
result[output_name],
ground_truth[output_name],
atol=1e-5,
assert_close=True
)
@pytest.mark.parametrize("model_name", ['identity'])
@pytest.mark.parametrize("shared_mem", valid_shm_modes())
def test_predict_multimodel_async(
client, model_name, shared_mem, model_inputs, model_output_sizes,
model_versions):
all_results = client.predict_multimodel_async(
[model_name], model_inputs, model_output_sizes,
model_versions=model_versions, shared_mem=shared_mem
)
ground_truth = get_ground_truth(model_inputs)
all_results = all_results.result(timeout=60)
for result in all_results:
for output_name in sorted(ground_truth.keys()):
arrays_close(
result.output[output_name],
ground_truth[output_name],
atol=1e-5,
assert_close=True
)
| 0 |
rapidsai_public_repos/rapids-triton/qa/L0_e2e/cpu_model_repository | rapidsai_public_repos/rapids-triton/qa/L0_e2e/cpu_model_repository/identity/config.pbtxt | backend: "rapids-identity"
max_batch_size: 32768
input [
{
name: "input__0"
data_type: TYPE_FP32
dims: [ 1 ]
}
]
output [
{
name: "output__0"
data_type: TYPE_FP32
dims: [ 1 ]
}
]
version_policy: { all { }}
instance_group [{ kind: KIND_CPU }]
parameters [ ]
dynamic_batching {
max_queue_delay_microseconds: 50
}
| 0 |
rapidsai_public_repos/rapids-triton/qa/L0_e2e/model_repository | rapidsai_public_repos/rapids-triton/qa/L0_e2e/model_repository/identity/config.pbtxt | backend: "rapids-identity"
max_batch_size: 32768
input [
{
name: "input__0"
data_type: TYPE_FP32
dims: [ 1 ]
}
]
output [
{
name: "output__0"
data_type: TYPE_FP32
dims: [ 1 ]
}
]
version_policy: { all { }}
instance_group [{ kind: KIND_GPU }]
parameters [ ]
dynamic_batching {
max_queue_delay_microseconds: 50
}
| 0 |
rapidsai_public_repos | rapidsai_public_repos/deeplearning/README.md | ### RAPIDS.AI Deep Learning Repo
This repository is the home of our efforts to integrate RAPIDS acceleration of dataframes on GPU into popular deep learning frameworks. The work can be broken down into three main sections:
- Dataloaders and preprocessing functionality developed to help provide connectivity between RAPIDS cuDF dataframes and the different deep learning libraries available.
- Improvements to optimizers through the fusion of GPU operations.
- Examples of the use of each of the above in competitions or on real world datasets.
Each deep learning library is contained within it's own subfolder, with the different dataloader options and examples contained within further subfolders. For now our focus is on PyTorch, however we expect to add other libraries in the future.
| 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/RecSys2019/README.md | ## Accelerating Recommender Systems by 15x with RAPIDS (Source Code)
This content was moved to a new [competition repository](https://github.com/NVIDIA-Merlin/competitions).
| 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/04_2_Normalization.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport IPython
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('./data/train.parquet')
df_valid = cudf.read_parquet('./data/valid.parquet')
df_test = cudf.read_parquet('./data/test.parquet')
df_train['brand'] = df_train['brand'].fillna('UNKNOWN')
df_valid['brand'] = df_valid['brand'].fillna('UNKNOWN')
df_test['brand'] = df_test['brand'].fillna('UNKNOWN')
df_train['cat_0'] = df_train['cat_0'].fillna('UNKNOWN')
df_valid['cat_0'] = df_valid['cat_0'].fillna('UNKNOWN')
df_test['cat_0'] = df_test['cat_0'].fillna('UNKNOWN')
df_train['cat_1'] = df_train['cat_1'].fillna('UNKNOWN')
df_valid['cat_1'] = df_valid['cat_1'].fillna('UNKNOWN')
df_test['cat_1'] = df_test['cat_1'].fillna('UNKNOWN')
df_train['cat_2'] = df_train['cat_2'].fillna('UNKNOWN')
df_valid['cat_2'] = df_valid['cat_2'].fillna('UNKNOWN')
df_test['cat_2'] = df_test['cat_2'].fillna('UNKNOWN')def target_encode(train, valid, col, target, kfold=5, smooth=20, gpu=True):
"""
train: train dataset
valid: validation dataset
col: column which will be encoded (in the example RESOURCE)
target: target column which will be used to calculate the statistic
"""
# We assume that the train dataset is shuffled
train['kfold'] = ((train.index) % kfold)
# We keep the original order as cudf merge will not preserve the original order
if gpu:
train['org_sorting'] = cupy.arange(len(train), dtype="int32")
else:
train['org_sorting'] = np.arange(len(train), dtype="int32")
# We create the output column, we fill with 0
col_name = '_'.join(col)
train['TE_' + col_name] = 0.
for i in range(kfold):
###################################
# filter for out of fold
# calculate the mean/counts per group category
# calculate the global mean for the oof
# calculate the smoothed TE
# merge it to the original dataframe
###################################
df_tmp = train[train['kfold']!=i]
mn = df_tmp[target].mean()
df_tmp = df_tmp[col + [target]].groupby(col).agg(['mean', 'count']).reset_index()
df_tmp.columns = col + ['mean', 'count']
df_tmp['TE_tmp'] = ((df_tmp['mean']*df_tmp['count'])+(mn*smooth)) / (df_tmp['count']+smooth)
df_tmp_m = train[col + ['kfold', 'org_sorting', 'TE_' + col_name]].merge(df_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
df_tmp_m.loc[df_tmp_m['kfold']==i, 'TE_' + col_name] = df_tmp_m.loc[df_tmp_m['kfold']==i, 'TE_tmp']
train['TE_' + col_name] = df_tmp_m['TE_' + col_name].fillna(mn).values
###################################
# calculate the mean/counts per group for the full training dataset
# calculate the global mean
# calculate the smoothed TE
# merge it to the original dataframe
# drop all temp columns
###################################
df_tmp = train[col + [target]].groupby(col).agg(['mean', 'count']).reset_index()
mn = train[target].mean()
df_tmp.columns = col + ['mean', 'count']
df_tmp['TE_tmp'] = ((df_tmp['mean']*df_tmp['count'])+(mn*smooth)) / (df_tmp['count']+smooth)
if gpu:
valid['org_sorting'] = cupy.arange(len(valid), dtype="int32")
else:
valid['org_sorting'] = np.arange(len(valid), dtype="int32")
df_tmp_m = valid[col + ['org_sorting']].merge(df_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
valid['TE_' + col_name] = df_tmp_m['TE_tmp'].fillna(mn).values
valid = valid.drop('org_sorting', axis=1)
train = train.drop('kfold', axis=1)
train = train.drop('org_sorting', axis=1)
return(train, valid)df_train.columnscats = [['cat_0'], ['cat_1'], ['cat_2'], ['cat_0', 'cat_1', 'cat_2'], ['ts_hour'], ['ts_weekday'], ['ts_weekday', 'ts_hour', 'cat_2', 'brand']]for cat in cats:
df_train, df_valid = target_encode(df_train, df_valid, cat, 'target')cats = ['brand', 'user_id', 'product_id', 'cat_0', 'cat_1', 'cat_2']def count_encode(train, valid, col, gpu=True):
"""
train: train dataset
valid: validation dataset
col: column which will be count encoded (in the example RESOURCE)
"""
# We keep the original order as cudf merge will not preserve the original order
if gpu:
train['org_sorting'] = cupy.arange(len(train), dtype="int32")
else:
train['org_sorting'] = np.arange(len(train), dtype="int32")
train_tmp = train[col].value_counts().reset_index()
train_tmp.columns = [col, 'CE_' + col]
df_tmp = train[[col, 'org_sorting']].merge(train_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
train['CE_' + col] = df_tmp['CE_' + col].fillna(0).values
if gpu:
valid['org_sorting'] = cupy.arange(len(valid), dtype="int32")
else:
valid['org_sorting'] = np.arange(len(valid), dtype="int32")
df_tmp = valid[[col, 'org_sorting']].merge(train_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
valid['CE_' + col] = df_tmp['CE_' + col].fillna(0).values
valid = valid.drop('org_sorting', axis=1)
train = train.drop('org_sorting', axis=1)
return(train, valid)%%time
for cat in cats:
df_train, df_valid = count_encode(df_train, df_valid, cat, gpu=True)df_train.head()df_train.columnsX = df_train['CE_product_id']X_norm = (X-X.mean())/X.std()fig, axs = plt.subplots(1, 2, figsize=(16,3))
axs[0].hist(X.sample(frac=0.01).to_pandas(), bins=50)
axs[0].set_title('Histogram non-normalised')
axs[1].hist(X_norm.sample(frac=0.01).to_pandas(), bins=50)
axs[1].set_title('Histogram normalised')X = df_train['CE_product_id'].to_pandas()X_log = np.log(X+1)X_norm = (X_log-X_log.mean())/X_log.std()fig, axs = plt.subplots(1, 2, figsize=(16,3))
axs[0].hist(X.sample(frac=0.01), bins=50)
axs[0].set_title('Histogram non-normalised')
axs[1].hist(X_norm.sample(frac=0.01), bins=50)
axs[1].set_title('Histogram normalised')X = df_train['TE_cat_2']plt.hist(((X-X.min())/(X.max()-X.min())).sample(frac=0.01).to_pandas(), bins=50)### ToDoapp = IPython.Application.instance()
app.kernel.do_shutdown(False) | 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/01_1_Exploring_DataSet.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport cudfimport warnings
warnings.filterwarnings("ignore")import IPython
import cudf
import pandas as pd
import matplotlib.pyplot as pltdf_train = pd.read_parquet('./data/train.parquet')
df_valid = pd.read_parquet('./data/valid.parquet')
df_test = pd.read_parquet('./data/test.parquet')df_train.shape, df_valid.shape, df_test.shapedf = pd.concat([df_train, df_valid, df_test],ignore_index=True)df.shapedf['timestamp'] = pd.to_datetime(df['timestamp'])df.head()df.target.mean()df['event_type'].value_counts(normalize=True)print('# of datapoints:' + str(df.shape))
print('# of unique users:' + str(df['user_id'].drop_duplicates().shape))
print('# of unique products:' + str(df['product_id'].drop_duplicates().shape))
print('# of unique sessions:' + str(df['user_session'].drop_duplicates().shape))def plot_sparse(df, col):
stats = df[[col, 'target']].groupby(col).agg(['count', 'mean', 'sum'])
stats = stats.reset_index()
stats.columns = [col, 'count', 'mean', 'sum']
stats_sort = stats['count'].value_counts().reset_index()
stats_sort = stats_sort.sort_values('index')
plt.figure(figsize=(15,4))
plt.bar(stats_sort['index'].astype(str).values[0:20], stats_sort['count'].values[0:20])
plt.title('Frequency of ' + str(col))
plt.xlabel('Number frequency')
plt.ylabel('Frequency')plot_sparse(df, 'product_id')plot_sparse(df, 'user_id')plot_sparse(df, 'brand')plot_sparse(df, 'cat_0')plot_sparse(df, 'cat_1')plot_sparse(df, 'cat_2')def plot_top20(df, col):
stats = df[[col, 'target']].groupby(col).agg(['count', 'mean', 'sum'])
stats = stats.reset_index()
stats.columns = [col, 'count', 'mean', 'sum']
stats = stats.sort_values('count', ascending=False)
fig, ax1 = plt.subplots(figsize=(15,4))
ax2 = ax1.twinx()
ax1.bar(stats[col].astype(str).values[0:20], stats['count'].values[0:20])
ax1.set_xticklabels(stats[col].astype(str).values[0:20], rotation='vertical')
ax2.plot(stats['mean'].values[0:20], color='red')
ax2.set_ylim(0,1)
ax2.set_ylabel('Mean Target')
ax1.set_ylabel('Frequency')
ax1.set_xlabel(col)
ax1.set_title('Top20 ' + col + 's based on frequency')plot_top20(df, 'product_id')plot_top20(df, 'user_id')plot_top20(df, 'brand')plot_top20(df, 'cat_0')plot_top20(df, 'cat_1')plot_top20(df, 'cat_2')df['date'] = pd.to_datetime(df['timestamp']).dt.dateplt.figure(figsize=(15,4))
plt.plot(df[['date', 'target']].groupby('date').target.mean())
plt.ylabel('average mean')
plt.xlabel('date')
plt.xticks(df[['date', 'target']].groupby('date').target.mean().index[::3], rotation='vertical')
print('')df[['date', 'target']].groupby('date').target.mean().sort_values().head(20)app = IPython.Application.instance()
app.kernel.do_shutdown(False) | 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/06_1_Intro_Dask.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport dask
from dask.distributed import Client, LocalCluster
import dask.dataframe as ddclient = Client(n_workers=8,
threads_per_worker=1,
memory_limit='50GB',
ip='127.0.0.1')
client%%time
ddf_train = dd.read_parquet('./data/train.parquet', blocksize=12e3)
ddf_valid = dd.read_parquet('./data/valid.parquet', blocksize=12e3)ddf_trainddf_train._meta%%time
ddf_train['cat_2_brand'] = ddf_train['cat_2'].astype(str) + '_' + ddf_train['brand'].astype(str)
ddf_valid['cat_2_brand'] = ddf_valid['cat_2'].astype(str) + '_' + ddf_valid['brand'].astype(str)
ddf_train_group = ddf_train[['cat_2_brand', 'target']].groupby(['cat_2_brand']).agg(['count', 'mean'])
ddf_train_group = ddf_train_group.reset_index()
ddf_train_group.columns = ['cat_2_brand', 'TE_count', 'TE_mean']
ddf_train = ddf_train.merge(ddf_train_group, how='left', on='cat_2_brand')
ddf_valid = ddf_valid.merge(ddf_train_group, how='left', on='cat_2_brand')
global_mean = ddf_train['target'].mean()
ddf_train['TE_mean'] = ddf_train.TE_mean.where(ddf_train['TE_count']>20, global_mean)
ddf_valid['TE_mean'] = ddf_valid.TE_mean.where(ddf_valid['TE_count']>20, global_mean)%%time
ddf_train.compute()
ddf_valid.compute()client.close()import dask as dask, dask_cudf
from dask.distributed import Client
from dask_cuda import LocalCUDAClustercluster = LocalCUDACluster(ip='127.0.0.1',
rmm_pool_size="16GB")
client = Client(cluster)
client%%time
ddf_train = dask_cudf.read_parquet('./data/train.parquet')
ddf_valid = dask_cudf.read_parquet('./data/valid.parquet')%%time
ddf_train['cat_2_brand'] = ddf_train['cat_2'].astype(str) + '_' + ddf_train['brand'].astype(str)
ddf_valid['cat_2_brand'] = ddf_valid['cat_2'].astype(str) + '_' + ddf_valid['brand'].astype(str)
ddf_train_group = ddf_train[['cat_2_brand', 'target']].groupby(['cat_2_brand']).agg(['count', 'mean'])
ddf_train_group = ddf_train_group.reset_index()
ddf_train_group.columns = ['cat_2_brand', 'TE_count', 'TE_mean']
ddf_train = ddf_train.merge(ddf_train_group, how='left', on='cat_2_brand')
ddf_valid = ddf_valid.merge(ddf_train_group, how='left', on='cat_2_brand')
global_mean = ddf_train['target'].mean()
ddf_train['TE_mean'] = ddf_train.TE_mean.where(ddf_train['TE_count']>20, global_mean)
ddf_valid['TE_mean'] = ddf_valid.TE_mean.where(ddf_valid['TE_count']>20, global_mean)%%time
ddf_train.compute()
ddf_valid.compute()client.close() | 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/03_4_CountEncoding.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport IPython
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('./data/train.parquet')
df_valid = cudf.read_parquet('./data/valid.parquet')
df_test = cudf.read_parquet('./data/test.parquet')
df_train['brand'] = df_train['brand'].fillna('UNKNOWN')
df_valid['brand'] = df_valid['brand'].fillna('UNKNOWN')
df_test['brand'] = df_test['brand'].fillna('UNKNOWN')
df_train['cat_2'] = df_train['cat_2'].fillna('UNKNOWN')
df_valid['cat_2'] = df_valid['cat_2'].fillna('UNKNOWN')
df_test['cat_2'] = df_test['cat_2'].fillna('UNKNOWN')df_train.head()cat = 'product_id'ce = df_train[cat].value_counts()cece = ce.reset_index()ce.columns = [cat, 'CE_' + cat]
df_train.merge(ce, how='left', left_on=cat, right_on=cat)ce = df_train[['cat_2', 'brand', 'target']].groupby(['cat_2', 'brand']).agg(['count'])cece = ce.reset_index()
ce.columns = ['cat_2', 'brand', 'CE_cat_2_brand']
df_train.merge(ce, how='left', left_on=['cat_2', 'brand'], right_on=['cat_2', 'brand'])col = 'user_id'### ToDo############### Solution ############################## Solution End ###########app = IPython.Application.instance()
app.kernel.do_shutdown(True)import IPython
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('./data/train.parquet')
df_valid = cudf.read_parquet('./data/valid.parquet')
df_train['brand'] = df_train['brand'].fillna('UNKNOWN')
df_valid['brand'] = df_valid['brand'].fillna('UNKNOWN')
df_train['cat_2'] = df_train['cat_2'].fillna('UNKNOWN')
df_valid['cat_2'] = df_valid['cat_2'].fillna('UNKNOWN')df_train_pd = df_train.to_pandas()
df_valid_pd = df_valid.to_pandas()def count_encode(train, valid, col, gpu=True):
"""
train: train dataset
valid: validation dataset
col: column which will be count encoded (in the example RESOURCE)
"""
# We keep the original order as cudf merge will not preserve the original order
if gpu:
train['org_sorting'] = cupy.arange(len(train), dtype="int32")
else:
train['org_sorting'] = np.arange(len(train), dtype="int32")
train_tmp = train[col].value_counts().reset_index()
train_tmp.columns = [col, 'CE_' + col]
df_tmp = train[[col, 'org_sorting']].merge(train_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
train['CE_' + col] = df_tmp['CE_' + col].fillna(0).values
if gpu:
valid['org_sorting'] = cupy.arange(len(valid), dtype="int32")
else:
valid['org_sorting'] = np.arange(len(valid), dtype="int32")
df_tmp = valid[[col, 'org_sorting']].merge(train_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
valid['CE_' + col] = df_tmp['CE_' + col].fillna(0).values
valid = valid.drop('org_sorting', axis=1)
train = train.drop('org_sorting', axis=1)
return(train, valid)%%time
df_train_pd, df_valid_pd = count_encode(df_train_pd, df_valid_pd, 'user_id', gpu=False)%%time
df_train, df_valid = count_encode(df_train, df_valid, 'user_id', gpu=True)app = IPython.Application.instance()
app.kernel.do_shutdown(False) | 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/03_3_TargetEncoding.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport IPython
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('./data/train.parquet')
df_valid = cudf.read_parquet('./data/valid.parquet')
df_test = cudf.read_parquet('./data/test.parquet')
df_train['brand'] = df_train['brand'].fillna('UNKNOWN')
df_valid['brand'] = df_valid['brand'].fillna('UNKNOWN')
df_test['brand'] = df_test['brand'].fillna('UNKNOWN')
df_train['cat_2'] = df_train['cat_2'].fillna('UNKNOWN')
df_valid['cat_2'] = df_valid['cat_2'].fillna('UNKNOWN')
df_test['cat_2'] = df_test['cat_2'].fillna('UNKNOWN')df_train.head()cat = 'brand'te = df_train[[cat, 'target']].groupby(cat).mean()tete = te.reset_index()
te.columns = [cat, 'TE_' + cat]
df_train.merge(te, how='left', on=cat)te = df_train[['brand', 'cat_2', 'target']].groupby(['brand', 'cat_2']).mean()tete = te.reset_index()
te.columns = ['brand', 'cat_2', 'TE_brand_cat_2']
df_train.merge(te, how='left', left_on=['brand', 'cat_2'], right_on=['brand', 'cat_2'])df_train[[cat, 'target']].groupby(cat).agg(['mean', 'count'])dd = df_train[[cat, 'target']].groupby(cat).agg(['mean', 'count']).reset_index()['target']['count']plt.bar(dd.groupby('count').count().index.to_array(), dd.groupby('count').count().to_array())
plt.xlim(0,50)### ToDocat = ['ts_weekday', 'ts_hour', 'cat_2', 'brand']
te = df_train.groupby(cat).target.agg(['mean', 'count']).reset_index()
te.columns = cat + ['TE_mean', 'TE_count']df_valid = df_valid.merge(te, on=cat, how='left')
df_valid['error'] = (df_valid['target'] - (df_valid['TE_mean']>=0.5)).abs()mean_global = df_train.target.mean()
df_valid['TE_mean'] = df_valid['TE_mean'].fillna(mean_global)w = 20
df_valid['TE_mean_smoothed'] = ((df_valid['TE_mean']*df_valid['TE_count'])+(mean_global*w))/(df_valid['TE_count']+w)
df_valid['TE_mean_smoothed'] = df_valid['TE_mean_smoothed'].fillna(mean_global)df_valid['error_smoothed'] = (df_valid['target'] - (df_valid['TE_mean_smoothed']>=0.5)).abs()df_valid[['TE_count', 'error']].groupby('TE_count').error.mean()df_valid[['TE_count', 'error_smoothed']].groupby('TE_count').error_smoothed.mean()from sklearn.metrics import roc_auc_scoreroc_auc_score(df_valid['target'].to_pandas().astype(int).values,
df_valid['TE_mean'].to_pandas().values)roc_auc_score(df_valid['target'].to_pandas().astype(int).values,
df_valid['TE_mean_smoothed'].to_pandas().values)app = IPython.Application.instance()
app.kernel.do_shutdown(True)import IPython
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('./data/train.parquet')
df_valid = cudf.read_parquet('./data/valid.parquet')
df_test = cudf.read_parquet('./data/test.parquet')
df_train['brand'] = df_train['brand'].fillna('UNKNOWN')
df_valid['brand'] = df_valid['brand'].fillna('UNKNOWN')
df_test['brand'] = df_test['brand'].fillna('UNKNOWN')
df_train['cat_2'] = df_train['cat_2'].fillna('UNKNOWN')
df_valid['cat_2'] = df_valid['cat_2'].fillna('UNKNOWN')
df_test['cat_2'] = df_test['cat_2'].fillna('UNKNOWN')def target_encode(train, valid, col, target, kfold=5, smooth=20):
"""
train: train dataset
valid: validation dataset
col: column which will be encoded (in the example RESOURCE)
target: target column which will be used to calculate the statistic
"""
# We assume that the train dataset is shuffled
train['kfold'] = ((train.index) % kfold)
# We keep the original order as cudf merge will not preserve the original order
train['org_sorting'] = cupy.arange(len(train), dtype="int32")
# We create the output column, we fill with 0
col_name = '_'.join(col)
train['TE_' + col_name] = 0.
for i in range(kfold):
###################################
# filter for out of fold
# calculate the mean/counts per group category
# calculate the global mean for the oof
# calculate the smoothed TE
# merge it to the original dataframe
###################################
df_tmp = train[train['kfold']!=i]
mn = df_tmp[target].mean()
df_tmp = df_tmp[col + [target]].groupby(col).agg(['mean', 'count']).reset_index()
df_tmp.columns = col + ['mean', 'count']
df_tmp['TE_tmp'] = ((df_tmp['mean']*df_tmp['count'])+(mn*smooth)) / (df_tmp['count']+smooth)
df_tmp_m = train[col + ['kfold', 'org_sorting', 'TE_' + col_name]].merge(df_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
df_tmp_m.loc[df_tmp_m['kfold']==i, 'TE_' + col_name] = df_tmp_m.loc[df_tmp_m['kfold']==i, 'TE_tmp']
train['TE_' + col_name] = df_tmp_m['TE_' + col_name].fillna(mn).values
###################################
# calculate the mean/counts per group for the full training dataset
# calculate the global mean
# calculate the smoothed TE
# merge it to the original dataframe
# drop all temp columns
###################################
df_tmp = train[col + [target]].groupby(col).agg(['mean', 'count']).reset_index()
mn = train[target].mean()
df_tmp.columns = col + ['mean', 'count']
df_tmp['TE_tmp'] = ((df_tmp['mean']*df_tmp['count'])+(mn*smooth)) / (df_tmp['count']+smooth)
valid['org_sorting'] = cupy.arange(len(valid), dtype="int32")
df_tmp_m = valid[col + ['org_sorting']].merge(df_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
valid['TE_' + col_name] = df_tmp_m['TE_tmp'].fillna(mn).values
valid = valid.drop('org_sorting', axis=1)
train = train.drop('kfold', axis=1)
train = train.drop('org_sorting', axis=1)
return(train, valid)%%time
df_train, df_valid = target_encode(df_train, df_valid, ['ts_weekday', 'ts_hour', 'cat_2', 'brand'], 'target')df_train.head()df_valid.head()app = IPython.Application.instance()
app.kernel.do_shutdown(True)import IPython
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('./data/train.parquet')
df_valid = cudf.read_parquet('./data/valid.parquet')
df_train['brand'] = df_train['brand'].fillna('UNKNOWN')
df_valid['brand'] = df_valid['brand'].fillna('UNKNOWN')
df_train['cat_2'] = df_train['cat_2'].fillna('UNKNOWN')
df_valid['cat_2'] = df_valid['cat_2'].fillna('UNKNOWN')def target_encode(train, valid, col, target, kfold=5, smooth=20, gpu=True):
"""
train: train dataset
valid: validation dataset
col: column which will be encoded (in the example RESOURCE)
target: target column which will be used to calculate the statistic
"""
# We assume that the train dataset is shuffled
train['kfold'] = ((train.index) % kfold)
# We keep the original order as cudf merge will not preserve the original order
if gpu:
train['org_sorting'] = cupy.arange(len(train), dtype="int32")
else:
train['org_sorting'] = np.arange(len(train), dtype="int32")
# We create the output column, we fill with 0
col_name = '_'.join(col)
train['TE_' + col_name] = 0.
for i in range(kfold):
###################################
# filter for out of fold
# calculate the mean/counts per group category
# calculate the global mean for the oof
# calculate the smoothed TE
# merge it to the original dataframe
###################################
df_tmp = train[train['kfold']!=i]
mn = df_tmp[target].mean()
df_tmp = df_tmp[col + [target]].groupby(col).agg(['mean', 'count']).reset_index()
df_tmp.columns = col + ['mean', 'count']
df_tmp['TE_tmp'] = ((df_tmp['mean']*df_tmp['count'])+(mn*smooth)) / (df_tmp['count']+smooth)
df_tmp_m = train[col + ['kfold', 'org_sorting', 'TE_' + col_name]].merge(df_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
df_tmp_m.loc[df_tmp_m['kfold']==i, 'TE_' + col_name] = df_tmp_m.loc[df_tmp_m['kfold']==i, 'TE_tmp']
train['TE_' + col_name] = df_tmp_m['TE_' + col_name].fillna(mn).values
###################################
# calculate the mean/counts per group for the full training dataset
# calculate the global mean
# calculate the smoothed TE
# merge it to the original dataframe
# drop all temp columns
###################################
df_tmp = train[col + [target]].groupby(col).agg(['mean', 'count']).reset_index()
mn = train[target].mean()
df_tmp.columns = col + ['mean', 'count']
df_tmp['TE_tmp'] = ((df_tmp['mean']*df_tmp['count'])+(mn*smooth)) / (df_tmp['count']+smooth)
if gpu:
valid['org_sorting'] = cupy.arange(len(valid), dtype="int32")
else:
valid['org_sorting'] = np.arange(len(valid), dtype="int32")
df_tmp_m = valid[col + ['org_sorting']].merge(df_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
valid['TE_' + col_name] = df_tmp_m['TE_tmp'].fillna(mn).values
valid = valid.drop('org_sorting', axis=1)
train = train.drop('kfold', axis=1)
train = train.drop('org_sorting', axis=1)
return(train, valid)df_train_pd = df_train.to_pandas()
df_valid_pd = df_valid.to_pandas()%%time
df_train_pd, df_valid_pd = target_encode(df_train_pd, df_valid_pd, ['ts_weekday', 'ts_hour', 'cat_2', 'brand'], 'target', gpu=False)%%time
df_train, df_valid = target_encode(df_train, df_valid, ['ts_weekday', 'ts_hour', 'cat_2', 'brand'], 'target')app = IPython.Application.instance()
app.kernel.do_shutdown(False) | 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/03_2_Categorify.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport IPython
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('./data/train.parquet')
df_valid = cudf.read_parquet('./data/valid.parquet')
df_test = cudf.read_parquet('./data/test.parquet')df_train.head()cat = 'product_id'df_train[cat].unique()codes, uniques = df_train[cat].factorize()codescodes.unique()import hashlib
from sys import getsizeofhashlib.md5(b'0').hexdigest()hashSeries = df_train[cat].to_pandas().apply(lambda x: hashlib.md5(bytes(str(x), encoding='utf-8')).hexdigest())hashSeriesgetsizeof(hashSeries)codes, uniques = hashSeries.factorize()getsizeof(pd.DataFrame(codes)[0])91691016/1020060933df_train[cat].value_counts()freq = df_train[cat].value_counts()freq = freq.reset_index()
freq.columns = [cat, 'count']
freq = freq.reset_index()
freq.columns = [cat + '_Categorify', cat, 'count']
freq_filtered = freq[freq['count']>5]
freq_filtered[cat + '_Categorify'] = freq_filtered[cat + '_Categorify']+1
freq_filtered = freq_filtered.drop('count', axis=1)
df_train = df_train.merge(freq_filtered, how='left', on=cat)
df_train[cat + '_Categorify'] = df_train[cat + '_Categorify'].fillna(0)df_train['product_id_Categorify'].min(), df_train['product_id_Categorify'].max(), df_train['product_id_Categorify'].drop_duplicates().shapedf_valid = df_valid.merge(freq_filtered, how='left', on=cat)
df_valid[cat + '_Categorify'] = df_valid[cat + '_Categorify'].fillna(0)
df_test = df_test.merge(freq_filtered, how='left', on=cat)
df_test[cat + '_Categorify'] = df_test[cat + '_Categorify'].fillna(0)### ToDo############### Solution ############################## Solution End ###########def categorify(df_train, df_valid, df_test, cat, freq_treshhold=20, unkown_id=1, lowfrequency_id=0):
freq = df_train[cat].value_counts()
freq = freq.reset_index()
freq.columns = [cat, 'count']
freq = freq.reset_index()
freq.columns = [cat + '_Categorify', cat, 'count']
freq[cat + '_Categorify'] = freq[cat + '_Categorify']+2
freq.loc[freq['count']<freq_treshhold, cat + '_Categorify'] = lowfrequency_id
freq = freq.drop('count', axis=1)
df_train = df_train.merge(freq, how='left', on=cat)
df_train[cat + '_Categorify'] = df_train[cat + '_Categorify'].fillna(unkown_id)
df_valid = df_valid.merge(freq, how='left', on=cat)
df_valid[cat + '_Categorify'] = df_valid[cat + '_Categorify'].fillna(unkown_id)
df_test = df_test.merge(freq, how='left', on=cat)
df_test[cat + '_Categorify'] = df_test[cat + '_Categorify'].fillna(unkown_id)df_train_pd = df_train.to_pandas()
df_valid_pd = df_valid.to_pandas()
df_test_pd = df_test.to_pandas()%%time
categorify(df_train_pd, df_valid_pd, df_test_pd, 'user_id')%%time
categorify(df_train, df_valid, df_test, 'user_id')app = IPython.Application.instance()
app.kernel.do_shutdown(False) | 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/README.md | # RecSys2020 Tutorial: Feature Engineering for Recommender Systems
by Chris Deotte (NVidia), Benedikt Schifferer (NVidia) and Even Oldridge (NVidia)
### Content
The selection of features and proper preparation of data for deep learning or machine learning models plays a significant role in the performance of recommender systems. To address this we propose a tutorial highlighting best practices and optimization techniques for feature engineering and preprocessing of recommender system datasets. The tutorial will explore feature engineering using pandas and Dask, and will also cover acceleration on the GPU using open source libraries like RAPIDS and NVTabular. Proposed length is 180min. We’ve designed the tutorial as a combination of a lecture covering the mathematical and theoretical background and an interactive session based on jupyter notebooks. Participants will practice the discussed features by writing their own implementation in Python. NVIDIA will host the tutorial on their infrastructure, providing dataset, jupyter notebooks and GPUs. Participants will be able to easily attend the tutorial via their web browsers, avoiding any complicated setup.
Beginner to intermediate users are the target audience, which should have prior knowledge in python programming using libraries, such as pandas and NumPy. In addition, they should have a basic understanding of recommender systems, decision trees and feed forward neural networks.
### Requirements
* RAPIDS cuDF 0.15
* NVTabular 0.2
* PyTorch
### Structure
* Notebooks contains theory and exercises
* /solutions/ contains solutions for the exercises
* /data/ is the path with the expected parquet files
| 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/05_1_TimeSeries_HistoricalEvents.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport IPython
import pandas as pd
import numpy as np
import cudf
import cupy
np.random.seed(42)itemid = [1000001]*10 + [1000002]*5 + [1000001]*5 + [1000002]*5 + [1000001]*1 + [1000002]*1 + [1000001]*2 + [1000002]*2
itemid += [1000001]*3 + [1000002]*2 + [1000001]*1 + [1000002]*1 + [1000001]*6 + [1000002]*3 + [1000001]*2 + [1000002]*2
userid = np.random.choice(list(range(10000)), len(itemid))
action = np.random.choice(list(range(2)), len(itemid), p=[0.2, 0.8])
timestamp = [pd.to_datetime('2020-01-01')]*15
timestamp += [pd.to_datetime('2020-01-02')]*10
timestamp += [pd.to_datetime('2020-01-03')]*2
timestamp += [pd.to_datetime('2020-01-04')]*4
timestamp += [pd.to_datetime('2020-01-05')]*5
timestamp += [pd.to_datetime('2020-01-07')]*2
timestamp += [pd.to_datetime('2020-01-08')]*9
timestamp += [pd.to_datetime('2020-01-09')]*4
data = pd.DataFrame({
'itemid': itemid,
'userid': userid,
'action': action,
'timestamp': timestamp
})data = cudf.from_pandas(data)data[data['itemid']==1000001]data_window = data[['itemid', 'timestamp', 'action']].groupby(['itemid', 'timestamp']).agg(['count', 'sum']).reset_index()
data_window.columns = ['itemid', 'timestamp', 'count', 'sum']
data_window.index = data_window['timestamp']data_windowoffset = '3D'
data_window_roll = data_window[['itemid', 'count', 'sum']].groupby(['itemid']).rolling(offset).sum().drop('itemid', axis=1)
data_window_rolldata_window_roll = data_window_roll.reset_index()
data_window_roll.columns = ['itemid', 'timestamp', 'count_' + offset, 'sum_' + offset]
data_window_roll[['count_' + offset, 'sum_' + offset]] = data_window_roll[['count_' + offset, 'sum_' + offset]].shift(1)
data_window_roll.loc[data_window_roll['itemid']!=data_window_roll['itemid'].shift(1), ['count_' + offset, 'sum_' + offset]] = 0
data_window_roll['avg_' + offset] = data_window_roll['sum_' + offset]/data_window_roll['count_' + offset]data_window_rolldata = data.merge(data_window_roll, how='left', on=['itemid', 'timestamp'])dataoffset = '7D'
data_window_roll = data_window[['itemid', 'count', 'sum']].groupby(['itemid']).rolling(offset).sum().drop('itemid', axis=1)
data_window_roll = data_window_roll.reset_index()
data_window_roll.columns = ['itemid', 'timestamp', 'count_' + offset, 'sum_' + offset]
data_window_roll[['count_' + offset, 'sum_' + offset]] = data_window_roll[['count_' + offset, 'sum_' + offset]].shift(1)
data_window_roll.loc[data_window_roll['itemid']!=data_window_roll['itemid'].shift(1), ['count_' + offset, 'sum_' + offset]] = 0
data_window_roll['avg_' + offset] = data_window_roll['sum_' + offset]/data_window_roll['count_' + offset]
data = data.merge(data_window_roll, how='left', on=['itemid', 'timestamp'])
data### loading
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('./data/train.parquet')
df_valid = cudf.read_parquet('./data/valid.parquet')
df_test = cudf.read_parquet('./data/test.parquet')
df_train['brand'] = df_train['brand'].fillna('UNKNOWN')
df_valid['brand'] = df_valid['brand'].fillna('UNKNOWN')
df_test['brand'] = df_test['brand'].fillna('UNKNOWN')
df_train['cat_0'] = df_train['cat_0'].fillna('UNKNOWN')
df_valid['cat_0'] = df_valid['cat_0'].fillna('UNKNOWN')
df_test['cat_0'] = df_test['cat_0'].fillna('UNKNOWN')
df_train['cat_1'] = df_train['cat_1'].fillna('UNKNOWN')
df_valid['cat_1'] = df_valid['cat_1'].fillna('UNKNOWN')
df_test['cat_1'] = df_test['cat_1'].fillna('UNKNOWN')
df_train['cat_2'] = df_train['cat_2'].fillna('UNKNOWN')
df_valid['cat_2'] = df_valid['cat_2'].fillna('UNKNOWN')
df_test['cat_2'] = df_test['cat_2'].fillna('UNKNOWN')df_train['date'] = cudf.from_pandas(pd.to_datetime(df_train['timestamp'].to_pandas()).dt.date)def rolling_window(df, col, offset):
data_window = df[[col, 'date', 'target']].groupby([col, 'date']).agg(['count', 'sum']).reset_index()
data_window.columns = [col, 'date', 'count', 'sum']
data_window.index = data_window['date']
data_window_roll = data_window[[col, 'count', 'sum']].groupby([col]).rolling(offset).sum().drop(col, axis=1)
data_window_roll = data_window_roll.reset_index()
data_window_roll.columns = [col, 'date', 'count_' + offset, 'sum_' + offset]
data_window_roll[['count_' + offset, 'sum_' + offset]] = data_window_roll[['count_' + offset, 'sum_' + offset]].shift(1)
data_window_roll.loc[data_window_roll[col]!=data_window_roll[col].shift(1), ['count_' + offset, 'sum_' + offset]] = 0
data_window_roll['avg_' + offset] = data_window_roll['sum_' + offset]/data_window_roll['count_' + offset]
data = df.merge(data_window_roll, how='left', on=[col, 'date'])
return(data)df_train_pd = df_train.to_pandas()%%time
_ = rolling_window(df_train_pd, 'product_id', '5D')%%time
_ = rolling_window(df_train, 'product_id', '5D')app = IPython.Application.instance()
app.kernel.do_shutdown(False) | 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/06_2_Intro_NVTabular_XGBoost.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport nvtabular as nvt
from nvtabular import opsimport glob
train_paths = glob.glob('./data/train.parquet')
valid_paths = glob.glob('./data/valid.parquet')
train_dataset = nvt.Dataset(train_paths, engine='parquet', part_mem_fraction=0.15)
valid_dataset = nvt.Dataset(valid_paths, engine='parquet', part_mem_fraction=0.15)train_paths, valid_pathsproc = nvt.Workflow(
cat_names=['product_id', 'brand', 'user_id',
'user_session', 'cat_0', 'cat_1', 'cat_2', 'cat_3',
'ts_hour', 'ts_minute', 'ts_weekday', 'ts_day', 'ts_month', 'ts_year'],
cont_names=['price', 'timestamp'],
label_name=['target']
)proc.add_feature([
ops.LambdaOp(
op_name = 'user_id',
f = lambda col, gdf: col.astype(str) + '_' + gdf['user_id'].astype(str),
columns = ['product_id', 'brand', 'ts_hour', 'ts_minute'],
replace=False
),
ops.LambdaOp(
op_name = 'user_id_brand',
f = lambda col, gdf: col.astype(str) + '_' + gdf['user_id'].astype(str) + '_' + gdf['brand'].astype(str),
columns = ['ts_hour', 'ts_weekday', 'cat_0', 'cat_1', 'cat_2'],
replace=False
),
ops.Categorify(
freq_threshold=15,
columns = [x + '_user_id' for x in ['product_id', 'brand', 'ts_hour', 'ts_minute']] + [x + '_user_id_brand' for x in ['ts_hour', 'ts_weekday', 'cat_0', 'cat_1', 'cat_2']] + ['product_id', 'brand', 'user_id', 'user_session', 'cat_0', 'cat_1', 'cat_2', 'cat_3', 'ts_hour', 'ts_minute', 'ts_weekday', 'ts_day', 'ts_month', 'ts_year']
),
ops.LambdaOp(
op_name = 'product_id',
f = lambda col, gdf: col.astype(str) + '_' + gdf['product_id'].astype(str),
columns = ['brand', 'user_id', 'cat_0'],
replace=False
),
ops.JoinGroupby(
cont_names=[]
),
ops.TargetEncoding(
cat_groups = ['brand', 'user_id', 'product_id', 'cat_2', ['ts_weekday','ts_day']],
cont_target= 'target',
kfold=5,
fold_seed=42,
p_smooth=20,
)
])import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)import cudf
import pandas as pd
import glob
train_paths = glob.glob('./output_nvt_train/*.parquet')
valid_paths = glob.glob('./output_nvt_valid/*.parquet')train = cudf.concat([cudf.read_parquet(x) for x in train_paths])
valid = cudf.concat([cudf.read_parquet(x) for x in valid_paths])train.drop(['user_session', 'brand_product_id', 'user_id_product_id', 'cat_0_product_id'], inplace=True)
valid.drop(['user_session', 'brand_product_id', 'user_id_product_id', 'cat_0_product_id'], inplace=True)import cupy
# TARGET ENCODE WITH KFOLD
def target_encode2(train, valid, col, target='target', kfold=5, smooth=20, verbose=True):
"""
train: train dataset
valid: validation dataset
col: column which will be encoded (in the example RESOURCE)
target: target column which will be used to calculate the statistic
"""
# We assume that the train dataset is shuffled
train['kfold'] = ((train.index) % kfold)
# We keep the original order as cudf merge will not preserve the original order
train['org_sorting'] = cupy.arange(len(train), dtype="int32")
# We create the output column, we fill with 0
col_name = '_'.join(col)+'_'+str(smooth)
train['TE_' + col_name] = 0.
for i in range(kfold):
###################################
# filter for out of fold
# calculate the mean/counts per group category
# calculate the global mean for the oof
# calculate the smoothed TE
# merge it to the original dataframe
###################################
df_tmp = train[train['kfold']!=i]
mn = df_tmp[target].mean()
df_tmp = df_tmp[col + [target]].groupby(col).agg(['mean', 'count']).reset_index()
df_tmp.columns = col + ['mean', 'count']
df_tmp['TE_tmp'] = ((df_tmp['mean']*df_tmp['count'])+(mn*smooth)) / (df_tmp['count']+smooth)
df_tmp_m = train[col + ['kfold', 'org_sorting', 'TE_' + col_name]].merge(df_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
df_tmp_m.loc[df_tmp_m['kfold']==i, 'TE_' + col_name] = df_tmp_m.loc[df_tmp_m['kfold']==i, 'TE_tmp']
train['TE_' + col_name] = df_tmp_m['TE_' + col_name].fillna(mn).values
###################################
# calculate the mean/counts per group for the full training dataset
# calculate the global mean
# calculate the smoothed TE
# merge it to the original dataframe
# drop all temp columns
###################################
df_tmp = train[col + [target]].groupby(col).agg(['mean', 'count']).reset_index()
mn = train[target].mean()
df_tmp.columns = col + ['mean', 'count']
df_tmp['TE_tmp'] = ((df_tmp['mean']*df_tmp['count'])+(mn*smooth)) / (df_tmp['count']+smooth)
valid['org_sorting'] = cupy.arange(len(valid), dtype="int32")
df_tmp_m = valid[col + ['org_sorting']].merge(df_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
valid['TE_' + col_name] = df_tmp_m['TE_tmp'].fillna(mn).values
valid = valid.drop('org_sorting', axis=1)
train = train.drop('kfold', axis=1)
train = train.drop('org_sorting', axis=1)
return (train, valid, 'TE_'+col_name)
def group_binning(df, valid, q_list = [0.1, 0.25, 0.5, 0.75, 0.9]):
df['price_bin'] = -1
valid['price_bin'] = -1
for i, q_value in enumerate(q_list):
print(q_value)
q = df[['cat_012', 'price']].groupby(['cat_012']).quantile(q_value)
q = q.reset_index()
q.columns = ['cat_012', 'price' + str(q_value)]
df = df.merge(q, how='left', on='cat_012')
valid = valid.merge(q, how='left', on='cat_012')
if i == 0:
df.loc[df['price']<=df['price' + str(q_value)], 'price_bin'] = i
valid.loc[valid['price']<=valid['price' + str(q_value)], 'price_bin'] = i
else:
df.loc[(df['price']>df['price' + str(q_list[i-1])]) & (df['price']<=df['price' + str(q_value)]), 'price_bin'] = i
valid.loc[(valid['price']>valid['price' + str(q_list[i-1])]) & (valid['price']<=valid['price' + str(q_value)]), 'price_bin'] = i
if i>=2:
df.drop(['price' + str(q_list[i-2])], axis=1, inplace=True)
valid.drop(['price' + str(q_list[i-2])], axis=1, inplace=True)
df.loc[df['price']>df['price' + str(q_value)], 'price_bin'] = i+1
df.drop(['price' + str(q_list[i-1])], axis=1, inplace=True)
df.drop(['price' + str(q_list[i])], axis=1, inplace=True)
valid.loc[valid['price']>valid['price' + str(q_value)], 'price_bin'] = i+1
valid.drop(['price' + str(q_list[i-1])], axis=1, inplace=True)
valid.drop(['price' + str(q_list[i])], axis=1, inplace=True)
def rolling_window(train, valid, col, offset):
df = cudf.concat([train, valid])
data_window = df[[col, 'date', 'target']].groupby([col, 'date']).agg(['count', 'sum']).reset_index()
data_window.columns = [col, 'date', 'count', 'sum']
data_window.index = data_window['date']
data_window_roll = data_window[[col, 'count', 'sum']].groupby([col]).rolling(offset).sum().drop(col, axis=1)
data_window_roll = data_window_roll.reset_index()
data_window_roll.columns = [col, 'date', col + '_count_' + offset, col + '_sum_' + offset]
data_window_roll[[col + '_count_' + offset, col + '_sum_' + offset]] = data_window_roll[[col + '_count_' + offset, col + '_sum_' + offset]].shift(1)
data_window_roll.loc[data_window_roll[col]!=data_window_roll[col].shift(1), [col + '_count_' + offset, col + '_sum_' + offset]] = 0
data_window_roll[col + '_avg_' + offset] = (data_window_roll[col + '_sum_' + offset]/data_window_roll[col + '_count_' + offset]).fillna(-1)
df = df.merge(data_window_roll, how='left', on=[col, 'date'])
train = df[df['ts_month']!=3]
valid = df[df['ts_month']==3]
return(train, valid)train['cat_012'] = train['cat_0'].astype(str) + '_' + train['cat_1'].astype(str) + '_' + train['cat_2'].astype(str)
valid['cat_012'] = valid['cat_0'].astype(str) + '_' + valid['cat_1'].astype(str) + '_' + valid['cat_2'].astype(str)group_binning(train, valid)
train, valid, name = target_encode2(train, valid, ['price_bin'], 'target', smooth=20)train['date'] = cudf.from_pandas(pd.to_datetime(train['timestamp'].to_pandas()).dt.date)
valid['date'] = cudf.from_pandas(pd.to_datetime(valid['timestamp'].to_pandas()).dt.date)train.columnstrain['product_user'] = train['product_id'].astype(str) + '_' + train['user_id'].astype(str) + '_' + train['cat_2'].astype(str)
valid['product_user'] = valid['product_id'].astype(str) + '_' + valid['user_id'].astype(str) + '_' + valid['cat_2'].astype(str)
# LABEL ENCODE CATEGORIES
comb = cudf.concat([train,valid],ignore_index=True)
for c in ['product_user']:
tmp,code = comb[c].factorize()
train[c] = tmp[:len(train)].values
valid[c] = tmp[len(train):].valuestrain.columnstrain.drop(['timestamp', 'cat_012', 'price_bin', 'date'] , inplace=True)
valid.drop(['timestamp', 'cat_012', 'price_bin', 'date'] , inplace=True)#train, valid = rolling_window(train, valid, 'product_user', '1D')
#train, valid = rolling_window(train, valid, 'product_user', '7D')
#train, valid = rolling_window(train, valid, 'product_user', '14D')train.to_parquet('train_fe.parquet')
valid.to_parquet('valid_fe.parquet')import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)import cudftrain = cudf.read_parquet('train_fe.parquet')
valid = cudf.read_parquet('valid_fe.parquet')train.columnsfeatures = [
'price',
'product_id',
'brand',
'user_id',
'cat_0',
'cat_1',
'cat_2',
'cat_3',
'ts_hour',
'ts_minute',
'ts_weekday',
'ts_day',
'ts_month',
'ts_year',
'product_id_user_id',
'brand_user_id',
'ts_hour_user_id',
'ts_minute_user_id',
'ts_hour_user_id_brand',
'ts_weekday_user_id_brand',
'cat_0_user_id_brand',
'cat_1_user_id_brand',
'cat_2_user_id_brand',
'brand_product_id_count',
'user_id_product_id_count',
'cat_0_product_id_count',
'TE_brand_target',
'TE_user_id_target',
'TE_product_id_target',
'TE_cat_2_target',
'TE_ts_weekday_ts_day_target',
'TE_price_bin_20'
]xgb_parms = {
'max_depth':12,
'learning_rate':0.02,
'subsample':0.4,
'colsample_bytree':0.4,
#'eval_metric':'logloss',
'eval_metric':'auc',
'objective':'binary:logistic',
'tree_method':'gpu_hist',
'seed': 123
}import xgboost as xgb
NROUND = 1000
ESR = 50
VERBOSE_EVAL = 25
dtrain = xgb.DMatrix(data=train[features],label=train.target)
dvalid = xgb.DMatrix(data=valid[features],label=valid.target)
model = xgb.train(xgb_parms,
dtrain=dtrain,
evals=[(dtrain,'train'),(dvalid,'valid')],
num_boost_round=NROUND,
early_stopping_rounds=ESR,
verbose_eval=VERBOSE_EVAL) | 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/05_2_TimeSeries_Differences.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport IPython
import pandas as pd
import numpy as np
import cudf
import cupy
np.random.seed(42)itemid = [1000001]*10 + [1000002]*5 + [1000001]*5 + [1000002]*5 + [1000001]*1 + [1000002]*1 + [1000001]*2 + [1000002]*2
itemid += [1000001]*3 + [1000002]*2 + [1000001]*1 + [1000002]*1 + [1000001]*6 + [1000002]*3 + [1000001]*2 + [1000002]*2
userid = np.random.choice(list(range(10000)), len(itemid))
action = np.random.choice(list(range(2)), len(itemid), p=[0.2, 0.8])
price = [100.00]*10 + [25.00]*5 + [100.00]*5 + [30.00]*5 + [125.00]*1 + [30.00]*1 + [125.00]*2 + [30.00]*2
price += [110.00]*3 + [30.00]*2 + [110.00]*1 + [20.00]*1 + [90.00]*6 + [20.00]*3 + [90.00]*2 + [20.00]*2
timestamp = [pd.to_datetime('2020-01-01')]*15
timestamp += [pd.to_datetime('2020-01-02')]*10
timestamp += [pd.to_datetime('2020-01-03')]*2
timestamp += [pd.to_datetime('2020-01-04')]*4
timestamp += [pd.to_datetime('2020-01-05')]*5
timestamp += [pd.to_datetime('2020-01-07')]*2
timestamp += [pd.to_datetime('2020-01-08')]*9
timestamp += [pd.to_datetime('2020-01-09')]*4
data = pd.DataFrame({
'itemid': itemid,
'userid': userid,
'price': price,
'action': action,
'timestamp': timestamp
})
data = cudf.from_pandas(data)data[data['itemid']==1000001].head(10)offset = 1
data_shift = data[['itemid', 'timestamp', 'price']].groupby(['itemid', 'timestamp']).mean().reset_index()
data_shift.columns = ['itemid', 'timestamp', 'mean']
data_shift['mean_' + str(offset)] = data_shift['mean'].shift(1)
data_shift.loc[data_shift['itemid']!=data_shift['itemid'].shift(1), 'mean_' + str(offset)] = None
data_shift['diff_' + str(offset)] = data_shift['mean'] - data_shift['mean_' + str(offset)]data_shift.head(10)data_shift.columns = ['itemid', 'timestamp', 'c1', 'c2', 'price_diff_1']
data_shift.drop(['c1', 'c2'], inplace=True).head(10)data = data.merge(data_shift, how='left', on=['itemid', 'timestamp'])data.head()import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('./data/train.parquet')
df_valid = cudf.read_parquet('./data/valid.parquet')
df_test = cudf.read_parquet('./data/test.parquet')
df_train['brand'] = df_train['brand'].fillna('UNKNOWN')
df_valid['brand'] = df_valid['brand'].fillna('UNKNOWN')
df_test['brand'] = df_test['brand'].fillna('UNKNOWN')
df_train['cat_0'] = df_train['cat_0'].fillna('UNKNOWN')
df_valid['cat_0'] = df_valid['cat_0'].fillna('UNKNOWN')
df_test['cat_0'] = df_test['cat_0'].fillna('UNKNOWN')
df_train['cat_1'] = df_train['cat_1'].fillna('UNKNOWN')
df_valid['cat_1'] = df_valid['cat_1'].fillna('UNKNOWN')
df_test['cat_1'] = df_test['cat_1'].fillna('UNKNOWN')
df_train['cat_2'] = df_train['cat_2'].fillna('UNKNOWN')
df_valid['cat_2'] = df_valid['cat_2'].fillna('UNKNOWN')
df_test['cat_2'] = df_test['cat_2'].fillna('UNKNOWN')df_train['date'] = cudf.from_pandas(pd.to_datetime(df_train['timestamp'].to_pandas()).dt.date)def difference_feature(df, offset):
data_shift = df[['product_id', 'date', 'price']].groupby(['product_id', 'date']).mean().reset_index()
data_shift.columns = ['product_id', 'date', 'mean']
data_shift['mean_' + str(offset)] = data_shift['mean'].shift(offset)
data_shift.loc[data_shift['product_id']!=data_shift['product_id'].shift(offset), 'mean_' + str(offset)] = None
data_shift['diff_' + str(offset)] = data_shift['mean'] - data_shift['mean_' + str(offset)]
data_shift.columns = ['product_id', 'date', 'c1', 'c2', 'price_diff_' + str(offset)]
data_shift.drop(['c1', 'c2'], axis=1, inplace=True)
df = df.merge(data_shift, how='left', on=['product_id', 'date'])df_train_pd = df_train.to_pandas()%%time
_ = difference_feature(df_train_pd, 1)%%time
_ = difference_feature(df_train, 1)import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(False) | 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/00_0_Initial.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport pandas as pd
import globlist_files = glob.glob('./data/*.csv')def process_files(file):
df_tmp = pd.read_csv(file)
df_tmp['session_purchase'] = df_tmp['user_session'] + '_' + df_tmp['product_id'].astype(str)
df_purchase = df_tmp[df_tmp['event_type']=='purchase']
df_cart = df_tmp[df_tmp['event_type']=='cart']
df_purchase = df_purchase[df_purchase['session_purchase'].isin(df_cart['session_purchase'])]
df_cart = df_cart[~(df_cart['session_purchase'].isin(df_purchase['session_purchase']))]
df_cart['target'] = 0
df_purchase['target'] = 1
df = pd.concat([df_cart, df_purchase])
df = df.drop('category_id', axis=1)
df = df.drop('session_purchase', axis=1)
df[['cat_0', 'cat_1', 'cat_2', 'cat_3']] = df['category_code'].str.split("\.", n = 3, expand = True).fillna('NA')
df['brand'] = df['brand'].fillna('NA')
df = df.drop('category_code', axis=1)
df['timestamp'] = pd.to_datetime(df['event_time'].str.replace(' UTC', ''))
df['ts_hour'] = df['timestamp'].dt.hour
df['ts_minute'] = df['timestamp'].dt.minute
df['ts_weekday'] = df['timestamp'].dt.weekday
df['ts_day'] = df['timestamp'].dt.day
df['ts_month'] = df['timestamp'].dt.month
df['ts_year'] = df['timestamp'].dt.year
df.to_csv('./' + file.replace('../data/', ''), index=False)list_filesfor file in list_files:
print(file)
process_files(file)lp = []
list_files = glob.glob('./*.csv')for file in list_files:
lp.append(pd.read_csv(file))df = pd.concat(lp)df.shapedf_test = df[df['ts_month']==4]
df_valid = df[df['ts_month']==3]
df_train = df[(df['ts_month']!=3)&(df['ts_month']!=4)]df_train.shape, df_valid.shape, df_test.shapedf_train.to_parquet('./data/train.parquet', index=False)df_valid.to_parquet('./data/valid.parquet', index=False)df_test.to_parquet('./data/test.parquet', index=False) | 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/02_1_Preprocessing.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport IPython
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('./data/train.parquet')
df_valid = cudf.read_parquet('./data/valid.parquet')
df_test = cudf.read_parquet('./data/test.parquet')df_train.isna().sum()df_train[['brand', 'target']].groupby(['brand']).agg(['mean', 'count']).sort_values(('target', 'count'), ascending=False).head(10)cols = ['brand', 'user_session', 'cat_0', 'cat_1', 'cat_2', 'cat_3']
for col in cols:
df_train['NA_' + col] = df_train[col].isna().astype(np.int8)
df_train[col].fillna('UNKNOWN', inplace=True)df_train.isna().sum()df_train[['brand', 'target']].groupby(['brand']).agg(['mean', 'count']).sort_values(('target', 'count'), ascending=False).head(10)np.random.seed(42)
df_train.loc[np.random.random(df_train.shape[0])<0.01, 'price'] = None
df_train['price'].isna().mean()df_median = df_train[['cat_2', 'price']].groupby('cat_2').median().reset_index()
df_median.columns = ['cat_2', 'price_median_per_cat2']
df_train = df_train.merge(df_median, how='left', on='cat_2')df_train['NA_price'] = df_train[col].isna().astype(np.int8)
df_train.loc[df_train['price'].isna(), 'price'] = df_train.loc[df_train['price'].isna(), 'price_median_per_cat2']
df_train.drop('price_median_per_cat2', inplace=True).head(5)df_train['price'].isna().mean()app = IPython.Application.instance()
app.kernel.do_shutdown(False) | 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/04_1_Binning.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport IPython
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('./data/train.parquet')
df_valid = cudf.read_parquet('./data/valid.parquet')
df_train['brand'] = df_train['brand'].fillna('UNKNOWN')
df_valid['brand'] = df_valid['brand'].fillna('UNKNOWN')
df_train['cat_0'] = df_train['cat_0'].fillna('UNKNOWN')
df_valid['cat_0'] = df_valid['cat_0'].fillna('UNKNOWN')
df_train['cat_1'] = df_train['cat_1'].fillna('UNKNOWN')
df_valid['cat_1'] = df_valid['cat_1'].fillna('UNKNOWN')
df_train['cat_2'] = df_train['cat_2'].fillna('UNKNOWN')
df_valid['cat_2'] = df_valid['cat_2'].fillna('UNKNOWN')df_train.head()df_train[['ts_hour', 'target']].groupby('ts_hour').agg(['count', 'mean']).head(10)hour = list(range(0,24))
hour_bin = [0]*4 + [1]*4 + [2]*7 + [3]*6 + [4]*3
data = cudf.DataFrame({
'hour': hour,
'hour_bin': hour_bin,
})data.head(10)df_train = df_train.merge(data, how='left', right_on='hour', left_on='ts_hour')df_train[['hour_bin', 'target']].groupby('hour_bin').agg(['count', 'mean'])plt.hist(df_train[df_train['cat_2']=='headphone'].price.to_pandas(), bins=50)
plt.show()
plt.hist(df_train[df_train['cat_1']=='smartphone'].price.to_pandas(), bins=50)
plt.show()
print('Headphones mean price: ' + str(df_train[df_train['cat_2']=='headphone'].price.mean()) + ' median price: ' + str(df_train[df_train['cat_2']=='headphone'].price.median()))
print('Smartphones mean price: ' + str(df_train[df_train['cat_1']=='smartphone'].price.mean()) + ' median price: ' + str(df_train[df_train['cat_1']=='smartphone'].price.median()))df_train['cat_012'] = df_train['cat_0'].astype(str) + '_' + df_train['cat_1'].astype(str) + '_' + df_train['cat_2'].astype(str)q_list = [0.1, 0.25, 0.5, 0.75, 0.9]for q_value in q_list:
q = df_train[['cat_012', 'price']].groupby(['cat_012']).quantile(q_value)
q = q.reset_index()
q.columns = ['cat_012', 'price' + str(q_value)]
df_train = df_train.merge(q, how='left', on='cat_012')df_train['price_bin'] = -1
for i, q_value in enumerate(q_list):
if i == 0:
df_train.loc[df_train['price']<=df_train['price' + str(q_value)], 'price_bin'] = i
else:
df_train.loc[(df_train['price']>df_train['price' + str(q_list[i-1])]) & (df_train['price']<=df_train['price' + str(q_value)]), 'price_bin'] = i
df_train.loc[df_train['price']>df_train['price' + str(q_value)], 'price_bin'] = i+1df_train[df_train['price_bin']==3][['price', 'price0.1', 'price0.25', 'price0.5', 'price0.75', 'price0.9', 'price_bin']].drop_duplicates()df_train = df_train.drop(['price' + str(x) for x in q_list])df_train[['price_bin', 'target']].groupby('price_bin').agg(['count', 'mean'])### ToDoapp = IPython.Application.instance()
app.kernel.do_shutdown(True)import IPython
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('./data/train.parquet')
df_train['brand'] = df_train['brand'].fillna('UNKNOWN')
df_train['cat_0'] = df_train['cat_0'].fillna('UNKNOWN')
df_train['cat_1'] = df_train['cat_1'].fillna('UNKNOWN')
df_train['cat_2'] = df_train['cat_2'].fillna('UNKNOWN')df_train['cat_012'] = df_train['cat_0'].astype(str) + '_' + df_train['cat_1'].astype(str) + '_' + df_train['cat_2'].astype(str)def group_binning(df, q_list = [0.1, 0.25, 0.5, 0.75, 0.9]):
df['price_bin'] = -1
for i, q_value in enumerate(q_list):
print(q_value)
q = df[['cat_012', 'price']].groupby(['cat_012']).quantile(q_value)
q = q.reset_index()
q.columns = ['cat_012', 'price' + str(q_value)]
df = df.merge(q, how='left', on='cat_012')
if i == 0:
df.loc[df['price']<=df['price' + str(q_value)], 'price_bin'] = i
else:
df.loc[(df['price']>df['price' + str(q_list[i-1])]) & (df['price']<=df['price' + str(q_value)]), 'price_bin'] = i
if i>=2:
df.drop(['price' + str(q_list[i-2])], axis=1, inplace=True)
df.loc[df['price']>df['price' + str(q_value)], 'price_bin'] = i+1
df.drop(['price' + str(q_list[i-1])], axis=1, inplace=True)
df.drop(['price' + str(q_list[i])], axis=1, inplace=True)
return(df)df_train_pd = df_train.to_pandas()%%time
df_train_pd = group_binning(df_train_pd)%%time
df_train = group_binning(df_train)df_train.head()app = IPython.Application.instance()
app.kernel.do_shutdown(False) | 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/Dockerfile | ARG dev=false
FROM nvcr.io/nvidia/cuda:10.2-devel-ubuntu18.04 AS base
# install python and cudf
RUN apt-get update
RUN apt-get -y install graphviz git
ADD https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh /miniconda.sh
RUN sh /miniconda.sh -b -p /conda && /conda/bin/conda update -n base conda && /conda/bin/conda create --name nvtabular -c rapidsai -c nvidia -c numba -c conda-forge -c defaults pip cudf=0.15 python=3.7 cudatoolkit=10.2 dask-cudf nodejs>=10.0.0 ipython jupyterlab
ENV PATH=${PATH}:/conda/bin
SHELL ["/bin/bash", "-c"]
RUN source activate nvtabular && pip3 install matplotlib pydotplus sklearn torch dask_cuda graphviz xgboost
RUN source activate nvtabular && pip3 install git+https://github.com/NVIDIA/NVTabular.git
# Create working directory to add repo.
WORKDIR /dli
# Load contents into student working directory, excluding anything in .dockerignore
ADD . .
# Set the initial working directory for students.
WORKDIR /dli/task
# Jupyter listens on 8888.
EXPOSE 8888
ENTRYPOINT ["/dli/entrypoint.sh"]
| 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/04_3_GaussRank.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport IPython
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('./data/train.parquet')
df_valid = cudf.read_parquet('./data/valid.parquet')
df_test = cudf.read_parquet('./data/test.parquet')
df_train['brand'] = df_train['brand'].fillna('UNKNOWN')
df_valid['brand'] = df_valid['brand'].fillna('UNKNOWN')
df_test['brand'] = df_test['brand'].fillna('UNKNOWN')
df_train['cat_0'] = df_train['cat_0'].fillna('UNKNOWN')
df_valid['cat_0'] = df_valid['cat_0'].fillna('UNKNOWN')
df_test['cat_0'] = df_test['cat_0'].fillna('UNKNOWN')
df_train['cat_1'] = df_train['cat_1'].fillna('UNKNOWN')
df_valid['cat_1'] = df_valid['cat_1'].fillna('UNKNOWN')
df_test['cat_1'] = df_test['cat_1'].fillna('UNKNOWN')
df_train['cat_2'] = df_train['cat_2'].fillna('UNKNOWN')
df_valid['cat_2'] = df_valid['cat_2'].fillna('UNKNOWN')
df_test['cat_2'] = df_test['cat_2'].fillna('UNKNOWN')def target_encode(train, valid, col, target, kfold=5, smooth=20, gpu=True):
"""
train: train dataset
valid: validation dataset
col: column which will be encoded (in the example RESOURCE)
target: target column which will be used to calculate the statistic
"""
# We assume that the train dataset is shuffled
train['kfold'] = ((train.index) % kfold)
# We keep the original order as cudf merge will not preserve the original order
if gpu:
train['org_sorting'] = cupy.arange(len(train), dtype="int32")
else:
train['org_sorting'] = np.arange(len(train), dtype="int32")
# We create the output column, we fill with 0
col_name = '_'.join(col)
train['TE_' + col_name] = 0.
for i in range(kfold):
###################################
# filter for out of fold
# calculate the mean/counts per group category
# calculate the global mean for the oof
# calculate the smoothed TE
# merge it to the original dataframe
###################################
df_tmp = train[train['kfold']!=i]
mn = df_tmp[target].mean()
df_tmp = df_tmp[col + [target]].groupby(col).agg(['mean', 'count']).reset_index()
df_tmp.columns = col + ['mean', 'count']
df_tmp['TE_tmp'] = ((df_tmp['mean']*df_tmp['count'])+(mn*smooth)) / (df_tmp['count']+smooth)
df_tmp_m = train[col + ['kfold', 'org_sorting', 'TE_' + col_name]].merge(df_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
df_tmp_m.loc[df_tmp_m['kfold']==i, 'TE_' + col_name] = df_tmp_m.loc[df_tmp_m['kfold']==i, 'TE_tmp']
train['TE_' + col_name] = df_tmp_m['TE_' + col_name].fillna(mn).values
###################################
# calculate the mean/counts per group for the full training dataset
# calculate the global mean
# calculate the smoothed TE
# merge it to the original dataframe
# drop all temp columns
###################################
df_tmp = train[col + [target]].groupby(col).agg(['mean', 'count']).reset_index()
mn = train[target].mean()
df_tmp.columns = col + ['mean', 'count']
df_tmp['TE_tmp'] = ((df_tmp['mean']*df_tmp['count'])+(mn*smooth)) / (df_tmp['count']+smooth)
if gpu:
valid['org_sorting'] = cupy.arange(len(valid), dtype="int32")
else:
valid['org_sorting'] = np.arange(len(valid), dtype="int32")
df_tmp_m = valid[col + ['org_sorting']].merge(df_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
valid['TE_' + col_name] = df_tmp_m['TE_tmp'].fillna(mn).values
valid = valid.drop('org_sorting', axis=1)
train = train.drop('kfold', axis=1)
train = train.drop('org_sorting', axis=1)
return(train, valid)cats = [['cat_0'], ['cat_1'], ['cat_2'], ['cat_0', 'cat_1', 'cat_2'], ['ts_hour'], ['ts_weekday'], ['ts_weekday', 'ts_hour', 'cat_2', 'brand']]for cat in cats:
df_train, df_valid = target_encode(df_train, df_valid, cat, 'target')cats = ['brand', 'user_id', 'product_id', 'cat_0', 'cat_1', 'cat_2']def count_encode(train, valid, col, gpu=True):
"""
train: train dataset
valid: validation dataset
col: column which will be count encoded (in the example RESOURCE)
"""
# We keep the original order as cudf merge will not preserve the original order
if gpu:
train['org_sorting'] = cupy.arange(len(train), dtype="int32")
else:
train['org_sorting'] = np.arange(len(train), dtype="int32")
train_tmp = train[col].value_counts().reset_index()
train_tmp.columns = [col, 'CE_' + col]
df_tmp = train[[col, 'org_sorting']].merge(train_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
train['CE_' + col] = df_tmp['CE_' + col].fillna(0).values
if gpu:
valid['org_sorting'] = cupy.arange(len(valid), dtype="int32")
else:
valid['org_sorting'] = np.arange(len(valid), dtype="int32")
df_tmp = valid[[col, 'org_sorting']].merge(train_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
valid['CE_' + col] = df_tmp['CE_' + col].fillna(0).values
valid = valid.drop('org_sorting', axis=1)
train = train.drop('org_sorting', axis=1)
return(train, valid)%%time
for cat in cats:
df_train, df_valid = count_encode(df_train, df_valid, cat, gpu=True)df_train.head()import cupy as cp
from cupyx.scipy.special import erfinv
import cudf as gd
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.special import erfinv as sp_erfinvdef gaussrank_cpu(data, epsilon = 1e-6):
r_cpu = data.argsort().argsort()
r_cpu = (r_cpu/r_cpu.max()-0.5)*2 # scale to (-1,1)
r_cpu = np.clip(r_cpu,-1+epsilon,1-epsilon)
r_cpu = sp_erfinv(r_cpu)
return(r_cpu)
def gaussrank_gpu(data, epsilon = 1e-6):
r_gpu = data.argsort().argsort()
r_gpu = (r_gpu/r_gpu.max()-0.5)*2 # scale to (-1,1)
r_gpu = cp.clip(r_gpu,-1+epsilon,1-epsilon)
r_gpu = erfinv(r_gpu)
return(r_gpu)fig, axs = plt.subplots(1, 2, figsize=(16,3))
col = 'CE_product_id'
data_sample = df_train[col].sample(frac=0.01)
axs[0].hist(data_sample.to_pandas().values, bins=50)
axs[1].hist(cp.asnumpy(gaussrank_gpu(df_train[col].values)), bins=50)
axs[0].set_title('Histogram non-normalized')
axs[1].set_title('Histogram Gauss Rank')data_cpu = df_train['TE_ts_weekday_ts_hour_cat_2_brand'].to_pandas().values
data_gpu = df_train['TE_ts_weekday_ts_hour_cat_2_brand'].values%%time
gaussrank_cpu(data_cpu)%%time
gaussrank_gpu(data_gpu)app = IPython.Application.instance()
app.kernel.do_shutdown(False) | 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/03_1_CombineCategories.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport IPython
import cudf
import pandas as pd
import numpy as npf1 = [0]*45 + [1]*45 + [2]*10 + [0]*5 + [1]*5 + [2]*90 + [0]*5 + [1]*5 + [2]*90 + [0]*45 + [1]*45 + [2]*10
f2 = [0]*45 + [0]*45 + [0]*10 + [1]*5 + [1]*5 + [1]*90 + [0]*5 + [0]*5 + [0]*90 + [1]*45 + [1]*45 + [1]*10
t = [1]*45 + [1]*45 + [1]*10 + [1]*5 + [1]*5 + [1]*90 + [0]*5 + [0]*5 + [0]*90 + [0]*45 + [0]*45 + [0]*10
data = cudf.DataFrame({
'f1': f1,
'f2': f2,
})
for i in range(3,5):
data['f' + str(i)] = np.random.choice(list(range(3)), data.shape[0])
data['target'] = tdata.head()data.groupby('f1').target.agg(['mean', 'count'])data.groupby('f2').target.agg(['mean', 'count'])data.groupby(['f1', 'f2']).target.agg(['mean', 'count'])df = data.to_pandas()import pydotplus
import sklearn.tree as tree
from IPython.display import Imagedef get_hotn_features(df):
out = []
for col in df.columns:
if col != 'target':
out.append(pd.get_dummies(df[col], prefix=col))
return(pd.concat(out, axis=1))
def viz_tree(df, lf):
dt_feature_names = list(get_hotn_features(df).columns)
dt_target_names = 'target'
tree.export_graphviz(lf, out_file='tree.dot',
feature_names=dt_feature_names, class_names=dt_target_names,
filled=True)
graph = pydotplus.graph_from_dot_file('tree.dot')
return(graph.create_png())lf = tree.DecisionTreeClassifier(max_depth=2)
lf.fit(get_hotn_features(df), df[['target']])
Image(viz_tree(df, lf))df['f1_f2'] = df['f1'].astype(str) + df['f2'].astype(str)lf.fit(get_hotn_features(df), df[['target']])
Image(viz_tree(df, lf))df.groupby([x for x in df.columns if 'target' not in x and 'f1_f2' not in x]).target.agg(['mean', 'count']).head(10)df.astype(str).describe()import cudfdf_train = cudf.read_parquet('./data/train.parquet')df_train.head()###ToDo
def explore_cat(df, cats):
df_agg = df_train[cats + ['target']].groupby(cats).agg(['mean', 'count']).reset_index()
df_agg.columns = cats + ['mean', 'count']
print(df_agg.sort_values('count', ascending=False).head(20))
cats = ['product_id', 'user_id']
explore_cat(df_train, cats)############### Solution ############################## Solution End ###########big_df = df_train.to_pandas()
big_data = df_trainprint('Pandas Shape:' + str(big_df.shape))
print('cudf Shape:' + str(big_df.shape))%%time
big_df.groupby(['cat_0', 'cat_1', 'cat_2', 'cat_3', 'brand']).target.agg(['mean', 'count'])
print('')%%time
big_data.groupby(['cat_0', 'cat_1', 'cat_2', 'cat_3', 'brand']).target.agg(['mean', 'count'])
print('')app = IPython.Application.instance()
app.kernel.do_shutdown(False) | 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/LICENSE | The MIT License (MIT)
Copyright (c) 2020, NVIDIA CORPORATION.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 0 |
rapidsai_public_repos/deeplearning/RecSys2020Tutorial | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/solutions/04_2_Normalization.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport IPython
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('../data/train.parquet')
df_valid = cudf.read_parquet('../data/valid.parquet')
df_test = cudf.read_parquet('../data/test.parquet')
df_train['brand'] = df_train['brand'].fillna('UNKNOWN')
df_valid['brand'] = df_valid['brand'].fillna('UNKNOWN')
df_test['brand'] = df_test['brand'].fillna('UNKNOWN')
df_train['cat_0'] = df_train['cat_0'].fillna('UNKNOWN')
df_valid['cat_0'] = df_valid['cat_0'].fillna('UNKNOWN')
df_test['cat_0'] = df_test['cat_0'].fillna('UNKNOWN')
df_train['cat_1'] = df_train['cat_1'].fillna('UNKNOWN')
df_valid['cat_1'] = df_valid['cat_1'].fillna('UNKNOWN')
df_test['cat_1'] = df_test['cat_1'].fillna('UNKNOWN')
df_train['cat_2'] = df_train['cat_2'].fillna('UNKNOWN')
df_valid['cat_2'] = df_valid['cat_2'].fillna('UNKNOWN')
df_test['cat_2'] = df_test['cat_2'].fillna('UNKNOWN')def target_encode(train, valid, col, target, kfold=5, smooth=20, gpu=True):
"""
train: train dataset
valid: validation dataset
col: column which will be encoded (in the example RESOURCE)
target: target column which will be used to calculate the statistic
"""
# We assume that the train dataset is shuffled
train['kfold'] = ((train.index) % kfold)
# We keep the original order as cudf merge will not preserve the original order
if gpu:
train['org_sorting'] = cupy.arange(len(train), dtype="int32")
else:
train['org_sorting'] = np.arange(len(train), dtype="int32")
# We create the output column, we fill with 0
col_name = '_'.join(col)
train['TE_' + col_name] = 0.
for i in range(kfold):
###################################
# filter for out of fold
# calculate the mean/counts per group category
# calculate the global mean for the oof
# calculate the smoothed TE
# merge it to the original dataframe
###################################
df_tmp = train[train['kfold']!=i]
mn = df_tmp[target].mean()
df_tmp = df_tmp[col + [target]].groupby(col).agg(['mean', 'count']).reset_index()
df_tmp.columns = col + ['mean', 'count']
df_tmp['TE_tmp'] = ((df_tmp['mean']*df_tmp['count'])+(mn*smooth)) / (df_tmp['count']+smooth)
df_tmp_m = train[col + ['kfold', 'org_sorting', 'TE_' + col_name]].merge(df_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
df_tmp_m.loc[df_tmp_m['kfold']==i, 'TE_' + col_name] = df_tmp_m.loc[df_tmp_m['kfold']==i, 'TE_tmp']
train['TE_' + col_name] = df_tmp_m['TE_' + col_name].fillna(mn).values
###################################
# calculate the mean/counts per group for the full training dataset
# calculate the global mean
# calculate the smoothed TE
# merge it to the original dataframe
# drop all temp columns
###################################
df_tmp = train[col + [target]].groupby(col).agg(['mean', 'count']).reset_index()
mn = train[target].mean()
df_tmp.columns = col + ['mean', 'count']
df_tmp['TE_tmp'] = ((df_tmp['mean']*df_tmp['count'])+(mn*smooth)) / (df_tmp['count']+smooth)
if gpu:
valid['org_sorting'] = cupy.arange(len(valid), dtype="int32")
else:
valid['org_sorting'] = np.arange(len(valid), dtype="int32")
df_tmp_m = valid[col + ['org_sorting']].merge(df_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
valid['TE_' + col_name] = df_tmp_m['TE_tmp'].fillna(mn).values
valid = valid.drop('org_sorting', axis=1)
train = train.drop('kfold', axis=1)
train = train.drop('org_sorting', axis=1)
return(train, valid)df_train.columnscats = [['cat_0'], ['cat_1'], ['cat_2'], ['cat_0', 'cat_1', 'cat_2'], ['ts_hour'], ['ts_weekday'], ['ts_weekday', 'ts_hour', 'cat_2', 'brand']]for cat in cats:
df_train, df_valid = target_encode(df_train, df_valid, cat, 'target')cats = ['brand', 'user_id', 'product_id', 'cat_0', 'cat_1', 'cat_2']def count_encode(train, valid, col, gpu=True):
"""
train: train dataset
valid: validation dataset
col: column which will be count encoded (in the example RESOURCE)
"""
# We keep the original order as cudf merge will not preserve the original order
if gpu:
train['org_sorting'] = cupy.arange(len(train), dtype="int32")
else:
train['org_sorting'] = np.arange(len(train), dtype="int32")
train_tmp = train[col].value_counts().reset_index()
train_tmp.columns = [col, 'CE_' + col]
df_tmp = train[[col, 'org_sorting']].merge(train_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
train['CE_' + col] = df_tmp['CE_' + col].fillna(0).values
if gpu:
valid['org_sorting'] = cupy.arange(len(valid), dtype="int32")
else:
valid['org_sorting'] = np.arange(len(valid), dtype="int32")
df_tmp = valid[[col, 'org_sorting']].merge(train_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
valid['CE_' + col] = df_tmp['CE_' + col].fillna(0).values
valid = valid.drop('org_sorting', axis=1)
train = train.drop('org_sorting', axis=1)
return(train, valid)%%time
for cat in cats:
df_train, df_valid = count_encode(df_train, df_valid, cat, gpu=True)df_train.head()df_train.columnsX = df_train['CE_product_id']X_norm = (X-X.mean())/X.std()fig, axs = plt.subplots(1, 2, figsize=(16,3))
axs[0].hist(X.sample(frac=0.01).to_pandas(), bins=50)
axs[0].set_title('Histogram non-normalised')
axs[1].hist(X_norm.sample(frac=0.01).to_pandas(), bins=50)
axs[1].set_title('Histogram normalised')X = df_train['CE_product_id'].to_pandas()X_log = np.log(X+1)X_norm = (X_log-X_log.mean())/X_log.std()fig, axs = plt.subplots(1, 2, figsize=(16,3))
axs[0].hist(X.sample(frac=0.01), bins=50)
axs[0].set_title('Histogram non-normalised')
axs[1].hist(X_norm.sample(frac=0.01), bins=50)
axs[1].set_title('Histogram normalised')X = df_train['TE_cat_2']plt.hist(((X-X.min())/(X.max()-X.min())).sample(frac=0.01).to_pandas(), bins=50)### ToDo############### Solution ###############
cat = 'price'
X = df_train[cat]
X_norm = (X-X.mean())/X.std()
X_log = np.log(X.to_pandas()+1)
X_log_norm = (X_log-X_log.mean())/X_log.std()
X_minmax = ((X-X.min())/(X.max()-X.min()))
fig, axs = plt.subplots(1, 4, figsize=(16,3))
axs[0].hist(X.sample(frac=0.01).to_pandas(), bins=50)
axs[0].set_title('Histogram non-normalised')
axs[1].hist(X_norm.sample(frac=0.01).to_pandas(), bins=50)
axs[1].set_title('Histogram normalised')
axs[2].hist(X_log_norm.sample(frac=0.01), bins=50)
axs[2].set_title('Histogram log-normalised')
axs[3].hist(X_minmax.sample(frac=0.01).to_pandas(), bins=50)
axs[3].set_title('Histogram minmax')cat = 'TE_ts_weekday_ts_hour_cat_2_brand'
X = df_train[cat]
X_norm = (X-X.mean())/X.std()
X_log = np.log(X.to_pandas()+1)
X_log_norm = (X_log-X_log.mean())/X_log.std()
X_minmax = ((X-X.min())/(X.max()-X.min()))
fig, axs = plt.subplots(1, 4, figsize=(16,3))
axs[0].hist(X.sample(frac=0.01).to_pandas(), bins=50)
axs[0].set_title('Histogram non-normalised')
axs[1].hist(X_norm.sample(frac=0.01).to_pandas(), bins=50)
axs[1].set_title('Histogram normalised')
axs[2].hist(X_log_norm.sample(frac=0.01), bins=50)
axs[2].set_title('Histogram log-normalised')
axs[3].hist(X_minmax.sample(frac=0.01).to_pandas(), bins=50)
axs[3].set_title('Histogram minmax')cat = 'CE_cat_2'
X = df_train[cat]
X_norm = (X-X.mean())/X.std()
X_log = np.log(X.to_pandas()+1)
X_log_norm = (X_log-X_log.mean())/X_log.std()
X_minmax = ((X-X.min())/(X.max()-X.min()))
fig, axs = plt.subplots(1, 4, figsize=(16,3))
axs[0].hist(X.sample(frac=0.01).to_pandas(), bins=50)
axs[0].set_title('Histogram non-normalised')
axs[1].hist(X_norm.sample(frac=0.01).to_pandas(), bins=50)
axs[1].set_title('Histogram normalised')
axs[2].hist(X_log_norm.sample(frac=0.01), bins=50)
axs[2].set_title('Histogram log-normalised')
axs[3].hist(X_minmax.sample(frac=0.01).to_pandas(), bins=50)
axs[3].set_title('Histogram minmax')############### Solution End ###########app = IPython.Application.instance()
app.kernel.do_shutdown(False) | 0 |
rapidsai_public_repos/deeplearning/RecSys2020Tutorial | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/solutions/01_1_Exploring_DataSet.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport cudfimport warnings
warnings.filterwarnings("ignore")import IPython
import cudf
import pandas as pd
import matplotlib.pyplot as pltdf_train = pd.read_parquet('../data/train.parquet')
df_valid = pd.read_parquet('../data/valid.parquet')
df_test = pd.read_parquet('../data/test.parquet')df_train.shape, df_valid.shape, df_test.shapedf = pd.concat([df_train, df_valid, df_test],ignore_index=True)df.shapedf['timestamp'] = pd.to_datetime(df['timestamp'])df.head()df.target.mean()df['event_type'].value_counts(normalize=True)print('# of datapoints:' + str(df.shape))
print('# of unique users:' + str(df['user_id'].drop_duplicates().shape))
print('# of unique products:' + str(df['product_id'].drop_duplicates().shape))
print('# of unique sessions:' + str(df['user_session'].drop_duplicates().shape))def plot_sparse(df, col):
stats = df[[col, 'target']].groupby(col).agg(['count', 'mean', 'sum'])
stats = stats.reset_index()
stats.columns = [col, 'count', 'mean', 'sum']
stats_sort = stats['count'].value_counts().reset_index()
stats_sort = stats_sort.sort_values('index')
plt.figure(figsize=(15,4))
plt.bar(stats_sort['index'].astype(str).values[0:20], stats_sort['count'].values[0:20])
plt.title('Frequency of ' + str(col))
plt.xlabel('Number frequency')
plt.ylabel('Frequency')plot_sparse(df, 'product_id')plot_sparse(df, 'user_id')plot_sparse(df, 'brand')plot_sparse(df, 'cat_0')plot_sparse(df, 'cat_1')plot_sparse(df, 'cat_2')def plot_top20(df, col):
stats = df[[col, 'target']].groupby(col).agg(['count', 'mean', 'sum'])
stats = stats.reset_index()
stats.columns = [col, 'count', 'mean', 'sum']
stats = stats.sort_values('count', ascending=False)
fig, ax1 = plt.subplots(figsize=(15,4))
ax2 = ax1.twinx()
ax1.bar(stats[col].astype(str).values[0:20], stats['count'].values[0:20])
ax1.set_xticklabels(stats[col].astype(str).values[0:20], rotation='vertical')
ax2.plot(stats['mean'].values[0:20], color='red')
ax2.set_ylim(0,1)
ax2.set_ylabel('Mean Target')
ax1.set_ylabel('Frequency')
ax1.set_xlabel(col)
ax1.set_title('Top20 ' + col + 's based on frequency')plot_top20(df, 'product_id')plot_top20(df, 'user_id')plot_top20(df, 'brand')plot_top20(df, 'cat_0')plot_top20(df, 'cat_1')plot_top20(df, 'cat_2')df['date'] = pd.to_datetime(df['timestamp']).dt.dateplt.figure(figsize=(15,4))
plt.plot(df[['date', 'target']].groupby('date').target.mean())
plt.ylabel('average mean')
plt.xlabel('date')
plt.xticks(df[['date', 'target']].groupby('date').target.mean().index[::3], rotation='vertical')
print('')df[['date', 'target']].groupby('date').target.mean().sort_values().head(20)app = IPython.Application.instance()
app.kernel.do_shutdown(False) | 0 |
rapidsai_public_repos/deeplearning/RecSys2020Tutorial | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/solutions/06_1_Intro_Dask.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport dask
from dask.distributed import Client, LocalCluster
import dask.dataframe as ddclient = Client(n_workers=8,
threads_per_worker=1,
memory_limit='50GB',
ip='127.0.0.1')
client%%time
ddf_train = dd.read_parquet('../data/train.parquet', blocksize=12e3)
ddf_valid = dd.read_parquet('../data/valid.parquet', blocksize=12e3)ddf_trainddf_train._meta%%time
ddf_train['cat_2_brand'] = ddf_train['cat_2'].astype(str) + '_' + ddf_train['brand'].astype(str)
ddf_valid['cat_2_brand'] = ddf_valid['cat_2'].astype(str) + '_' + ddf_valid['brand'].astype(str)
ddf_train_group = ddf_train[['cat_2_brand', 'target']].groupby(['cat_2_brand']).agg(['count', 'mean'])
ddf_train_group = ddf_train_group.reset_index()
ddf_train_group.columns = ['cat_2_brand', 'TE_count', 'TE_mean']
ddf_train = ddf_train.merge(ddf_train_group, how='left', on='cat_2_brand')
ddf_valid = ddf_valid.merge(ddf_train_group, how='left', on='cat_2_brand')
global_mean = ddf_train['target'].mean()
ddf_train['TE_mean'] = ddf_train.TE_mean.where(ddf_train['TE_count']>20, global_mean)
ddf_valid['TE_mean'] = ddf_valid.TE_mean.where(ddf_valid['TE_count']>20, global_mean)%%time
ddf_train.compute()
ddf_valid.compute()client.close()import dask as dask, dask_cudf
from dask.distributed import Client
from dask_cuda import LocalCUDAClustercluster = LocalCUDACluster(ip='127.0.0.1',
rmm_pool_size="16GB")
client = Client(cluster)
client%%time
ddf_train = dask_cudf.read_parquet('../data/train.parquet')
ddf_valid = dask_cudf.read_parquet('../data/valid.parquet')%%time
ddf_train['cat_2_brand'] = ddf_train['cat_2'].astype(str) + '_' + ddf_train['brand'].astype(str)
ddf_valid['cat_2_brand'] = ddf_valid['cat_2'].astype(str) + '_' + ddf_valid['brand'].astype(str)
ddf_train_group = ddf_train[['cat_2_brand', 'target']].groupby(['cat_2_brand']).agg(['count', 'mean'])
ddf_train_group = ddf_train_group.reset_index()
ddf_train_group.columns = ['cat_2_brand', 'TE_count', 'TE_mean']
ddf_train = ddf_train.merge(ddf_train_group, how='left', on='cat_2_brand')
ddf_valid = ddf_valid.merge(ddf_train_group, how='left', on='cat_2_brand')
global_mean = ddf_train['target'].mean()
ddf_train['TE_mean'] = ddf_train.TE_mean.where(ddf_train['TE_count']>20, global_mean)
ddf_valid['TE_mean'] = ddf_valid.TE_mean.where(ddf_valid['TE_count']>20, global_mean)%%time
ddf_train.compute()
ddf_valid.compute()client.close() | 0 |
rapidsai_public_repos/deeplearning/RecSys2020Tutorial | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/solutions/03_4_CountEncoding.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport IPython
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('../data/train.parquet')
df_valid = cudf.read_parquet('../data/valid.parquet')
df_test = cudf.read_parquet('../data/test.parquet')
df_train['brand'] = df_train['brand'].fillna('UNKNOWN')
df_valid['brand'] = df_valid['brand'].fillna('UNKNOWN')
df_test['brand'] = df_test['brand'].fillna('UNKNOWN')
df_train['cat_2'] = df_train['cat_2'].fillna('UNKNOWN')
df_valid['cat_2'] = df_valid['cat_2'].fillna('UNKNOWN')
df_test['cat_2'] = df_test['cat_2'].fillna('UNKNOWN')df_train.head()cat = 'product_id'ce = df_train[cat].value_counts()cece = ce.reset_index()ce.columns = [cat, 'CE_' + cat]
df_train.merge(ce, how='left', left_on=cat, right_on=cat)ce = df_train[['cat_2', 'brand', 'target']].groupby(['cat_2', 'brand']).agg(['count'])cece = ce.reset_index()
ce.columns = ['cat_2', 'brand', 'CE_cat_2_brand']
df_train.merge(ce, how='left', left_on=['cat_2', 'brand'], right_on=['cat_2', 'brand'])col = 'user_id'### ToDo############### Solution ###############df_train['org_sorting'] = cupy.arange(len(df_train), dtype="int32")
train_tmp = df_train[col].value_counts().reset_index()
train_tmp.columns = [col, 'CE_' + col]
df_tmp = df_train[[col, 'org_sorting']].merge(train_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
df_train['CE_' + col] = df_tmp['CE_' + col].fillna(0).values
df_valid['org_sorting'] = cupy.arange(len(df_valid), dtype="int32")
df_tmp = df_valid[[col, 'org_sorting']].merge(train_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
df_valid['CE_' + col] = df_tmp['CE_' + col].fillna(0).values
df_valid = df_valid.drop('org_sorting', axis=1)
df_train = df_train.drop('org_sorting', axis=1)############### Solution End ###########app = IPython.Application.instance()
app.kernel.do_shutdown(True)import IPython
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('../data/train.parquet')
df_valid = cudf.read_parquet('../data/valid.parquet')
df_train['brand'] = df_train['brand'].fillna('UNKNOWN')
df_valid['brand'] = df_valid['brand'].fillna('UNKNOWN')
df_train['cat_2'] = df_train['cat_2'].fillna('UNKNOWN')
df_valid['cat_2'] = df_valid['cat_2'].fillna('UNKNOWN')df_train_pd = df_train.to_pandas()
df_valid_pd = df_valid.to_pandas()def count_encode(train, valid, col, gpu=True):
"""
train: train dataset
valid: validation dataset
col: column which will be count encoded (in the example RESOURCE)
"""
# We keep the original order as cudf merge will not preserve the original order
if gpu:
train['org_sorting'] = cupy.arange(len(train), dtype="int32")
else:
train['org_sorting'] = np.arange(len(train), dtype="int32")
train_tmp = train[col].value_counts().reset_index()
train_tmp.columns = [col, 'CE_' + col]
df_tmp = train[[col, 'org_sorting']].merge(train_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
train['CE_' + col] = df_tmp['CE_' + col].fillna(0).values
if gpu:
valid['org_sorting'] = cupy.arange(len(valid), dtype="int32")
else:
valid['org_sorting'] = np.arange(len(valid), dtype="int32")
df_tmp = valid[[col, 'org_sorting']].merge(train_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
valid['CE_' + col] = df_tmp['CE_' + col].fillna(0).values
valid = valid.drop('org_sorting', axis=1)
train = train.drop('org_sorting', axis=1)
return(train, valid)%%time
df_train_pd, df_valid_pd = count_encode(df_train_pd, df_valid_pd, 'user_id', gpu=False)%%time
df_train, df_valid = count_encode(df_train, df_valid, 'user_id', gpu=True)app = IPython.Application.instance()
app.kernel.do_shutdown(False) | 0 |
rapidsai_public_repos/deeplearning/RecSys2020Tutorial | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/solutions/03_3_TargetEncoding.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport IPython
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('../data/train.parquet')
df_valid = cudf.read_parquet('../data/valid.parquet')
df_test = cudf.read_parquet('../data/test.parquet')
df_train['brand'] = df_train['brand'].fillna('UNKNOWN')
df_valid['brand'] = df_valid['brand'].fillna('UNKNOWN')
df_test['brand'] = df_test['brand'].fillna('UNKNOWN')
df_train['cat_2'] = df_train['cat_2'].fillna('UNKNOWN')
df_valid['cat_2'] = df_valid['cat_2'].fillna('UNKNOWN')
df_test['cat_2'] = df_test['cat_2'].fillna('UNKNOWN')df_train.head()cat = 'brand'te = df_train[[cat, 'target']].groupby(cat).mean()tete = te.reset_index()
te.columns = [cat, 'TE_' + cat]
df_train.merge(te, how='left', on=cat)te = df_train[['brand', 'cat_2', 'target']].groupby(['brand', 'cat_2']).mean()tete = te.reset_index()
te.columns = ['brand', 'cat_2', 'TE_brand_cat_2']
df_train.merge(te, how='left', left_on=['brand', 'cat_2'], right_on=['brand', 'cat_2'])df_train[[cat, 'target']].groupby(cat).agg(['mean', 'count'])dd = df_train[[cat, 'target']].groupby(cat).agg(['mean', 'count']).reset_index()['target']['count']plt.bar(dd.groupby('count').count().index.to_array(), dd.groupby('count').count().to_array())
plt.xlim(0,50)### ToDo############### Solution ###############
feat = ['brand', 'cat_2']
w = 20
mean_global = df_train.target.mean()
te = df_train.groupby(feat)['target'].agg(['mean','count']).reset_index()
te['TE_brand_cat_2'] = ((te['mean']*te['count'])+(mean_global*w))/(te['count']+w)
df_train = df_train.merge(te, on=feat, how='left')
df_valid = df_valid.merge( te, on=feat, how='left' )
df_test = df_test.merge( te, on=feat, how='left' )
df_valid['TE_brand_cat_2'] = df_valid['TE_brand_cat_2'].fillna(mean_global)
df_test['TE_brand_cat_2'] = df_test['TE_brand_cat_2'].fillna(mean_global)############### Solution End ###########cat = ['ts_weekday', 'ts_hour', 'cat_2', 'brand']
te = df_train.groupby(cat).target.agg(['mean', 'count']).reset_index()
te.columns = cat + ['TE_mean', 'TE_count']df_valid = df_valid.merge(te, on=cat, how='left')
df_valid['error'] = (df_valid['target'] - (df_valid['TE_mean']>=0.5)).abs()mean_global = df_train.target.mean()
df_valid['TE_mean'] = df_valid['TE_mean'].fillna(mean_global)w = 20
df_valid['TE_mean_smoothed'] = ((df_valid['TE_mean']*df_valid['TE_count'])+(mean_global*w))/(df_valid['TE_count']+w)
df_valid['TE_mean_smoothed'] = df_valid['TE_mean_smoothed'].fillna(mean_global)df_valid['error_smoothed'] = (df_valid['target'] - (df_valid['TE_mean_smoothed']>=0.5)).abs()df_valid[['TE_count', 'error']].groupby('TE_count').error.mean()df_valid[['TE_count', 'error_smoothed']].groupby('TE_count').error_smoothed.mean()from sklearn.metrics import roc_auc_scoreroc_auc_score(df_valid['target'].to_pandas().astype(int).values,
df_valid['TE_mean'].to_pandas().values)roc_auc_score(df_valid['target'].to_pandas().astype(int).values,
df_valid['TE_mean_smoothed'].to_pandas().values)app = IPython.Application.instance()
app.kernel.do_shutdown(True)import IPython
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('../data/train.parquet')
df_valid = cudf.read_parquet('../data/valid.parquet')
df_test = cudf.read_parquet('../data/test.parquet')
df_train['brand'] = df_train['brand'].fillna('UNKNOWN')
df_valid['brand'] = df_valid['brand'].fillna('UNKNOWN')
df_test['brand'] = df_test['brand'].fillna('UNKNOWN')
df_train['cat_2'] = df_train['cat_2'].fillna('UNKNOWN')
df_valid['cat_2'] = df_valid['cat_2'].fillna('UNKNOWN')
df_test['cat_2'] = df_test['cat_2'].fillna('UNKNOWN')def target_encode(train, valid, col, target, kfold=5, smooth=20):
"""
train: train dataset
valid: validation dataset
col: column which will be encoded (in the example RESOURCE)
target: target column which will be used to calculate the statistic
"""
# We assume that the train dataset is shuffled
train['kfold'] = ((train.index) % kfold)
# We keep the original order as cudf merge will not preserve the original order
train['org_sorting'] = cupy.arange(len(train), dtype="int32")
# We create the output column, we fill with 0
col_name = '_'.join(col)
train['TE_' + col_name] = 0.
for i in range(kfold):
###################################
# filter for out of fold
# calculate the mean/counts per group category
# calculate the global mean for the oof
# calculate the smoothed TE
# merge it to the original dataframe
###################################
df_tmp = train[train['kfold']!=i]
mn = df_tmp[target].mean()
df_tmp = df_tmp[col + [target]].groupby(col).agg(['mean', 'count']).reset_index()
df_tmp.columns = col + ['mean', 'count']
df_tmp['TE_tmp'] = ((df_tmp['mean']*df_tmp['count'])+(mn*smooth)) / (df_tmp['count']+smooth)
df_tmp_m = train[col + ['kfold', 'org_sorting', 'TE_' + col_name]].merge(df_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
df_tmp_m.loc[df_tmp_m['kfold']==i, 'TE_' + col_name] = df_tmp_m.loc[df_tmp_m['kfold']==i, 'TE_tmp']
train['TE_' + col_name] = df_tmp_m['TE_' + col_name].fillna(mn).values
###################################
# calculate the mean/counts per group for the full training dataset
# calculate the global mean
# calculate the smoothed TE
# merge it to the original dataframe
# drop all temp columns
###################################
df_tmp = train[col + [target]].groupby(col).agg(['mean', 'count']).reset_index()
mn = train[target].mean()
df_tmp.columns = col + ['mean', 'count']
df_tmp['TE_tmp'] = ((df_tmp['mean']*df_tmp['count'])+(mn*smooth)) / (df_tmp['count']+smooth)
valid['org_sorting'] = cupy.arange(len(valid), dtype="int32")
df_tmp_m = valid[col + ['org_sorting']].merge(df_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
valid['TE_' + col_name] = df_tmp_m['TE_tmp'].fillna(mn).values
valid = valid.drop('org_sorting', axis=1)
train = train.drop('kfold', axis=1)
train = train.drop('org_sorting', axis=1)
return(train, valid)%%time
df_train, df_valid = target_encode(df_train, df_valid, ['ts_weekday', 'ts_hour', 'cat_2', 'brand'], 'target')df_train.head()df_valid.head()app = IPython.Application.instance()
app.kernel.do_shutdown(True)import IPython
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('../data/train.parquet')
df_valid = cudf.read_parquet('../data/valid.parquet')
df_train['brand'] = df_train['brand'].fillna('UNKNOWN')
df_valid['brand'] = df_valid['brand'].fillna('UNKNOWN')
df_train['cat_2'] = df_train['cat_2'].fillna('UNKNOWN')
df_valid['cat_2'] = df_valid['cat_2'].fillna('UNKNOWN')def target_encode(train, valid, col, target, kfold=5, smooth=20, gpu=True):
"""
train: train dataset
valid: validation dataset
col: column which will be encoded (in the example RESOURCE)
target: target column which will be used to calculate the statistic
"""
# We assume that the train dataset is shuffled
train['kfold'] = ((train.index) % kfold)
# We keep the original order as cudf merge will not preserve the original order
if gpu:
train['org_sorting'] = cupy.arange(len(train), dtype="int32")
else:
train['org_sorting'] = np.arange(len(train), dtype="int32")
# We create the output column, we fill with 0
col_name = '_'.join(col)
train['TE_' + col_name] = 0.
for i in range(kfold):
###################################
# filter for out of fold
# calculate the mean/counts per group category
# calculate the global mean for the oof
# calculate the smoothed TE
# merge it to the original dataframe
###################################
df_tmp = train[train['kfold']!=i]
mn = df_tmp[target].mean()
df_tmp = df_tmp[col + [target]].groupby(col).agg(['mean', 'count']).reset_index()
df_tmp.columns = col + ['mean', 'count']
df_tmp['TE_tmp'] = ((df_tmp['mean']*df_tmp['count'])+(mn*smooth)) / (df_tmp['count']+smooth)
df_tmp_m = train[col + ['kfold', 'org_sorting', 'TE_' + col_name]].merge(df_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
df_tmp_m.loc[df_tmp_m['kfold']==i, 'TE_' + col_name] = df_tmp_m.loc[df_tmp_m['kfold']==i, 'TE_tmp']
train['TE_' + col_name] = df_tmp_m['TE_' + col_name].fillna(mn).values
###################################
# calculate the mean/counts per group for the full training dataset
# calculate the global mean
# calculate the smoothed TE
# merge it to the original dataframe
# drop all temp columns
###################################
df_tmp = train[col + [target]].groupby(col).agg(['mean', 'count']).reset_index()
mn = train[target].mean()
df_tmp.columns = col + ['mean', 'count']
df_tmp['TE_tmp'] = ((df_tmp['mean']*df_tmp['count'])+(mn*smooth)) / (df_tmp['count']+smooth)
if gpu:
valid['org_sorting'] = cupy.arange(len(valid), dtype="int32")
else:
valid['org_sorting'] = np.arange(len(valid), dtype="int32")
df_tmp_m = valid[col + ['org_sorting']].merge(df_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
valid['TE_' + col_name] = df_tmp_m['TE_tmp'].fillna(mn).values
valid = valid.drop('org_sorting', axis=1)
train = train.drop('kfold', axis=1)
train = train.drop('org_sorting', axis=1)
return(train, valid)df_train_pd = df_train.to_pandas()
df_valid_pd = df_valid.to_pandas()%%time
df_train_pd, df_valid_pd = target_encode(df_train_pd, df_valid_pd, ['ts_weekday', 'ts_hour', 'cat_2', 'brand'], 'target', gpu=False)%%time
df_train, df_valid = target_encode(df_train, df_valid, ['ts_weekday', 'ts_hour', 'cat_2', 'brand'], 'target')app = IPython.Application.instance()
app.kernel.do_shutdown(False) | 0 |
rapidsai_public_repos/deeplearning/RecSys2020Tutorial | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/solutions/03_2_Categorify.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport IPython
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('../data/train.parquet')
df_valid = cudf.read_parquet('../data/valid.parquet')
df_test = cudf.read_parquet('../data/test.parquet')df_train.head()cat = 'product_id'df_train[cat].unique()codes, uniques = df_train[cat].factorize()codescodes.unique()import hashlib
from sys import getsizeofhashlib.md5(b'0').hexdigest()hashSeries = df_train[cat].to_pandas().apply(lambda x: hashlib.md5(bytes(str(x), encoding='utf-8')).hexdigest())hashSeriesgetsizeof(hashSeries)codes, uniques = hashSeries.factorize()getsizeof(pd.DataFrame(codes)[0])91691016/1020060933df_train[cat].value_counts()freq = df_train[cat].value_counts()freq = freq.reset_index()
freq.columns = [cat, 'count']
freq = freq.reset_index()
freq.columns = [cat + '_Categorify', cat, 'count']
freq_filtered = freq[freq['count']>5]
freq_filtered[cat + '_Categorify'] = freq_filtered[cat + '_Categorify']+1
freq_filtered = freq_filtered.drop('count', axis=1)
df_train = df_train.merge(freq_filtered, how='left', on=cat)
df_train[cat + '_Categorify'] = df_train[cat + '_Categorify'].fillna(0)df_train['product_id_Categorify'].min(), df_train['product_id_Categorify'].max(), df_train['product_id_Categorify'].drop_duplicates().shapedf_valid = df_valid.merge(freq_filtered, how='left', on=cat)
df_valid[cat + '_Categorify'] = df_valid[cat + '_Categorify'].fillna(0)
df_test = df_test.merge(freq_filtered, how='left', on=cat)
df_test[cat + '_Categorify'] = df_test[cat + '_Categorify'].fillna(0)### ToDo############### Solution ###############cat = 'brand'
freq = df_train[cat].value_counts()
freq = freq.reset_index()
freq.columns = [cat, 'count']
freq = freq.reset_index()
freq.columns = [cat + '_Categorify', cat, 'count']
freq[cat + '_Categorify'] = freq[cat + '_Categorify']+2
freq.loc[freq['count']<20, cat + '_Categorify'] = 0
freq = freq.drop('count', axis=1)
df_train = df_train.merge(freq, how='left', on=cat)
df_train[cat + '_Categorify'] = df_train[cat + '_Categorify'].fillna(1)
df_valid = df_valid.merge(freq, how='left', on=cat)
df_valid[cat + '_Categorify'] = df_valid[cat + '_Categorify'].fillna(1)
df_test = df_test.merge(freq, how='left', on=cat)
df_test[cat + '_Categorify'] = df_test[cat + '_Categorify'].fillna(1)(df_train['brand_Categorify']==0).value_counts()(df_test['brand_Categorify']==0).value_counts()(df_test['brand_Categorify']==1).value_counts()############### Solution End ###########def categorify(df_train, df_valid, df_test, cat, freq_treshhold=20, unkown_id=1, lowfrequency_id=0):
freq = df_train[cat].value_counts()
freq = freq.reset_index()
freq.columns = [cat, 'count']
freq = freq.reset_index()
freq.columns = [cat + '_Categorify', cat, 'count']
freq[cat + '_Categorify'] = freq[cat + '_Categorify']+2
freq.loc[freq['count']<freq_treshhold, cat + '_Categorify'] = lowfrequency_id
freq = freq.drop('count', axis=1)
df_train = df_train.merge(freq, how='left', on=cat)
df_train[cat + '_Categorify'] = df_train[cat + '_Categorify'].fillna(unkown_id)
df_valid = df_valid.merge(freq, how='left', on=cat)
df_valid[cat + '_Categorify'] = df_valid[cat + '_Categorify'].fillna(unkown_id)
df_test = df_test.merge(freq, how='left', on=cat)
df_test[cat + '_Categorify'] = df_test[cat + '_Categorify'].fillna(unkown_id)df_train_pd = df_train.to_pandas()
df_valid_pd = df_valid.to_pandas()
df_test_pd = df_test.to_pandas()%%time
categorify(df_train_pd, df_valid_pd, df_test_pd, 'user_id')%%time
categorify(df_train, df_valid, df_test, 'user_id')app = IPython.Application.instance()
app.kernel.do_shutdown(False) | 0 |
rapidsai_public_repos/deeplearning/RecSys2020Tutorial | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/solutions/05_1_TimeSeries_HistoricalEvents.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport IPython
import pandas as pd
import numpy as np
import cudf
import cupy
np.random.seed(42)itemid = [1000001]*10 + [1000002]*5 + [1000001]*5 + [1000002]*5 + [1000001]*1 + [1000002]*1 + [1000001]*2 + [1000002]*2
itemid += [1000001]*3 + [1000002]*2 + [1000001]*1 + [1000002]*1 + [1000001]*6 + [1000002]*3 + [1000001]*2 + [1000002]*2
userid = np.random.choice(list(range(10000)), len(itemid))
action = np.random.choice(list(range(2)), len(itemid), p=[0.2, 0.8])
timestamp = [pd.to_datetime('2020-01-01')]*15
timestamp += [pd.to_datetime('2020-01-02')]*10
timestamp += [pd.to_datetime('2020-01-03')]*2
timestamp += [pd.to_datetime('2020-01-04')]*4
timestamp += [pd.to_datetime('2020-01-05')]*5
timestamp += [pd.to_datetime('2020-01-07')]*2
timestamp += [pd.to_datetime('2020-01-08')]*9
timestamp += [pd.to_datetime('2020-01-09')]*4
data = pd.DataFrame({
'itemid': itemid,
'userid': userid,
'action': action,
'timestamp': timestamp
})data = cudf.from_pandas(data)data[data['itemid']==1000001]data_window = data[['itemid', 'timestamp', 'action']].groupby(['itemid', 'timestamp']).agg(['count', 'sum']).reset_index()
data_window.columns = ['itemid', 'timestamp', 'count', 'sum']
data_window.index = data_window['timestamp']data_windowoffset = '3D'
data_window_roll = data_window[['itemid', 'count', 'sum']].groupby(['itemid']).rolling(offset).sum().drop('itemid', axis=1)
data_window_rolldata_window_roll = data_window_roll.reset_index()
data_window_roll.columns = ['itemid', 'timestamp', 'count_' + offset, 'sum_' + offset]
data_window_roll[['count_' + offset, 'sum_' + offset]] = data_window_roll[['count_' + offset, 'sum_' + offset]].shift(1)
data_window_roll.loc[data_window_roll['itemid']!=data_window_roll['itemid'].shift(1), ['count_' + offset, 'sum_' + offset]] = 0
data_window_roll['avg_' + offset] = data_window_roll['sum_' + offset]/data_window_roll['count_' + offset]data_window_rolldata = data.merge(data_window_roll, how='left', on=['itemid', 'timestamp'])dataoffset = '7D'
data_window_roll = data_window[['itemid', 'count', 'sum']].groupby(['itemid']).rolling(offset).sum().drop('itemid', axis=1)
data_window_roll = data_window_roll.reset_index()
data_window_roll.columns = ['itemid', 'timestamp', 'count_' + offset, 'sum_' + offset]
data_window_roll[['count_' + offset, 'sum_' + offset]] = data_window_roll[['count_' + offset, 'sum_' + offset]].shift(1)
data_window_roll.loc[data_window_roll['itemid']!=data_window_roll['itemid'].shift(1), ['count_' + offset, 'sum_' + offset]] = 0
data_window_roll['avg_' + offset] = data_window_roll['sum_' + offset]/data_window_roll['count_' + offset]
data = data.merge(data_window_roll, how='left', on=['itemid', 'timestamp'])
data### loading
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('../data/train.parquet')
df_valid = cudf.read_parquet('../data/valid.parquet')
df_test = cudf.read_parquet('../data/test.parquet')
df_train['brand'] = df_train['brand'].fillna('UNKNOWN')
df_valid['brand'] = df_valid['brand'].fillna('UNKNOWN')
df_test['brand'] = df_test['brand'].fillna('UNKNOWN')
df_train['cat_0'] = df_train['cat_0'].fillna('UNKNOWN')
df_valid['cat_0'] = df_valid['cat_0'].fillna('UNKNOWN')
df_test['cat_0'] = df_test['cat_0'].fillna('UNKNOWN')
df_train['cat_1'] = df_train['cat_1'].fillna('UNKNOWN')
df_valid['cat_1'] = df_valid['cat_1'].fillna('UNKNOWN')
df_test['cat_1'] = df_test['cat_1'].fillna('UNKNOWN')
df_train['cat_2'] = df_train['cat_2'].fillna('UNKNOWN')
df_valid['cat_2'] = df_valid['cat_2'].fillna('UNKNOWN')
df_test['cat_2'] = df_test['cat_2'].fillna('UNKNOWN')df_train['date'] = cudf.from_pandas(pd.to_datetime(df_train['timestamp'].to_pandas()).dt.date)############### Solution ###############
offset = '7D'
data_window = df_train[['product_id', 'date', 'target']].groupby(['product_id', 'date']).agg(['count', 'sum']).reset_index()
data_window.columns = ['product_id', 'date', 'count', 'sum']
data_window.index = data_window['date']
data_window_roll = data_window[['product_id', 'count', 'sum']].groupby(['product_id']).rolling(offset).sum().drop('product_id', axis=1)
data_window_roll = data_window_roll.reset_index()
data_window_roll.columns = ['product_id', 'date', 'count_' + offset, 'sum_' + offset]
data_window_roll[['count_' + offset, 'sum_' + offset]] = data_window_roll[['count_' + offset, 'sum_' + offset]].shift(1)
data_window_roll.loc[data_window_roll['product_id']!=data_window_roll['product_id'].shift(1), ['count_' + offset, 'sum_' + offset]] = 0
data_window_roll['avg_' + offset] = data_window_roll['sum_' + offset]/data_window_roll['count_' + offset]
data = df_train.merge(data_window_roll, how='left', on=['product_id', 'date'])
data############### Solution End ###########def rolling_window(df, col, offset):
data_window = df[[col, 'date', 'target']].groupby([col, 'date']).agg(['count', 'sum']).reset_index()
data_window.columns = [col, 'date', 'count', 'sum']
data_window.index = data_window['date']
data_window_roll = data_window[[col, 'count', 'sum']].groupby([col]).rolling(offset).sum().drop(col, axis=1)
data_window_roll = data_window_roll.reset_index()
data_window_roll.columns = [col, 'date', 'count_' + offset, 'sum_' + offset]
data_window_roll[['count_' + offset, 'sum_' + offset]] = data_window_roll[['count_' + offset, 'sum_' + offset]].shift(1)
data_window_roll.loc[data_window_roll[col]!=data_window_roll[col].shift(1), ['count_' + offset, 'sum_' + offset]] = 0
data_window_roll['avg_' + offset] = data_window_roll['sum_' + offset]/data_window_roll['count_' + offset]
data = df.merge(data_window_roll, how='left', on=[col, 'date'])
return(data)df_train_pd = df_train.to_pandas()%%time
_ = rolling_window(df_train_pd, 'product_id', '5D')%%time
_ = rolling_window(df_train, 'product_id', '5D')app = IPython.Application.instance()
app.kernel.do_shutdown(False) | 0 |
rapidsai_public_repos/deeplearning/RecSys2020Tutorial | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/solutions/06_2_Intro_NVTabular_XGBoost.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport nvtabular as nvt
from nvtabular import opsimport glob
train_paths = glob.glob('../data/train.parquet')
valid_paths = glob.glob('../data/valid.parquet')
train_dataset = nvt.Dataset(train_paths, engine='parquet', part_mem_fraction=0.15)
valid_dataset = nvt.Dataset(valid_paths, engine='parquet', part_mem_fraction=0.15)train_paths, valid_pathsproc = nvt.Workflow(
cat_names=['product_id', 'brand', 'user_id',
'user_session', 'cat_0', 'cat_1', 'cat_2', 'cat_3',
'ts_hour', 'ts_minute', 'ts_weekday', 'ts_day', 'ts_month', 'ts_year'],
cont_names=['price', 'timestamp'],
label_name=['target']
)proc.add_feature([
ops.LambdaOp(
op_name = 'user_id',
f = lambda col, gdf: col.astype(str) + '_' + gdf['user_id'].astype(str),
columns = ['product_id', 'brand', 'ts_hour', 'ts_minute'],
replace=False
),
ops.LambdaOp(
op_name = 'user_id_brand',
f = lambda col, gdf: col.astype(str) + '_' + gdf['user_id'].astype(str) + '_' + gdf['brand'].astype(str),
columns = ['ts_hour', 'ts_weekday', 'cat_0', 'cat_1', 'cat_2'],
replace=False
),
ops.Categorify(
freq_threshold=15,
columns = [x + '_user_id' for x in ['product_id', 'brand', 'ts_hour', 'ts_minute']] + [x + '_user_id_brand' for x in ['ts_hour', 'ts_weekday', 'cat_0', 'cat_1', 'cat_2']] + ['product_id', 'brand', 'user_id', 'user_session', 'cat_0', 'cat_1', 'cat_2', 'cat_3', 'ts_hour', 'ts_minute', 'ts_weekday', 'ts_day', 'ts_month', 'ts_year']
),
ops.LambdaOp(
op_name = 'product_id',
f = lambda col, gdf: col.astype(str) + '_' + gdf['product_id'].astype(str),
columns = ['brand', 'user_id', 'cat_0'],
replace=False
),
ops.JoinGroupby(
cont_names=[]
),
ops.TargetEncoding(
cat_groups = ['brand', 'user_id', 'product_id', 'cat_2', ['ts_weekday','ts_day']],
cont_target= 'target',
kfold=5,
fold_seed=42,
p_smooth=20,
)
])import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)import cudf
import pandas as pd
import glob
train_paths = glob.glob('./output_nvt_train/*.parquet')
valid_paths = glob.glob('./output_nvt_valid/*.parquet')train = cudf.concat([cudf.read_parquet(x) for x in train_paths])
valid = cudf.concat([cudf.read_parquet(x) for x in valid_paths])train.drop(['user_session', 'brand_product_id', 'user_id_product_id', 'cat_0_product_id'], inplace=True)
valid.drop(['user_session', 'brand_product_id', 'user_id_product_id', 'cat_0_product_id'], inplace=True)import cupy
# TARGET ENCODE WITH KFOLD
def target_encode2(train, valid, col, target='target', kfold=5, smooth=20, verbose=True):
"""
train: train dataset
valid: validation dataset
col: column which will be encoded (in the example RESOURCE)
target: target column which will be used to calculate the statistic
"""
# We assume that the train dataset is shuffled
train['kfold'] = ((train.index) % kfold)
# We keep the original order as cudf merge will not preserve the original order
train['org_sorting'] = cupy.arange(len(train), dtype="int32")
# We create the output column, we fill with 0
col_name = '_'.join(col)+'_'+str(smooth)
train['TE_' + col_name] = 0.
for i in range(kfold):
###################################
# filter for out of fold
# calculate the mean/counts per group category
# calculate the global mean for the oof
# calculate the smoothed TE
# merge it to the original dataframe
###################################
df_tmp = train[train['kfold']!=i]
mn = df_tmp[target].mean()
df_tmp = df_tmp[col + [target]].groupby(col).agg(['mean', 'count']).reset_index()
df_tmp.columns = col + ['mean', 'count']
df_tmp['TE_tmp'] = ((df_tmp['mean']*df_tmp['count'])+(mn*smooth)) / (df_tmp['count']+smooth)
df_tmp_m = train[col + ['kfold', 'org_sorting', 'TE_' + col_name]].merge(df_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
df_tmp_m.loc[df_tmp_m['kfold']==i, 'TE_' + col_name] = df_tmp_m.loc[df_tmp_m['kfold']==i, 'TE_tmp']
train['TE_' + col_name] = df_tmp_m['TE_' + col_name].fillna(mn).values
###################################
# calculate the mean/counts per group for the full training dataset
# calculate the global mean
# calculate the smoothed TE
# merge it to the original dataframe
# drop all temp columns
###################################
df_tmp = train[col + [target]].groupby(col).agg(['mean', 'count']).reset_index()
mn = train[target].mean()
df_tmp.columns = col + ['mean', 'count']
df_tmp['TE_tmp'] = ((df_tmp['mean']*df_tmp['count'])+(mn*smooth)) / (df_tmp['count']+smooth)
valid['org_sorting'] = cupy.arange(len(valid), dtype="int32")
df_tmp_m = valid[col + ['org_sorting']].merge(df_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
valid['TE_' + col_name] = df_tmp_m['TE_tmp'].fillna(mn).values
valid = valid.drop('org_sorting', axis=1)
train = train.drop('kfold', axis=1)
train = train.drop('org_sorting', axis=1)
return (train, valid, 'TE_'+col_name)
def group_binning(df, valid, q_list = [0.1, 0.25, 0.5, 0.75, 0.9]):
df['price_bin'] = -1
valid['price_bin'] = -1
for i, q_value in enumerate(q_list):
print(q_value)
q = df[['cat_012', 'price']].groupby(['cat_012']).quantile(q_value)
q = q.reset_index()
q.columns = ['cat_012', 'price' + str(q_value)]
df = df.merge(q, how='left', on='cat_012')
valid = valid.merge(q, how='left', on='cat_012')
if i == 0:
df.loc[df['price']<=df['price' + str(q_value)], 'price_bin'] = i
valid.loc[valid['price']<=valid['price' + str(q_value)], 'price_bin'] = i
else:
df.loc[(df['price']>df['price' + str(q_list[i-1])]) & (df['price']<=df['price' + str(q_value)]), 'price_bin'] = i
valid.loc[(valid['price']>valid['price' + str(q_list[i-1])]) & (valid['price']<=valid['price' + str(q_value)]), 'price_bin'] = i
if i>=2:
df.drop(['price' + str(q_list[i-2])], axis=1, inplace=True)
valid.drop(['price' + str(q_list[i-2])], axis=1, inplace=True)
df.loc[df['price']>df['price' + str(q_value)], 'price_bin'] = i+1
df.drop(['price' + str(q_list[i-1])], axis=1, inplace=True)
df.drop(['price' + str(q_list[i])], axis=1, inplace=True)
valid.loc[valid['price']>valid['price' + str(q_value)], 'price_bin'] = i+1
valid.drop(['price' + str(q_list[i-1])], axis=1, inplace=True)
valid.drop(['price' + str(q_list[i])], axis=1, inplace=True)
def rolling_window(train, valid, col, offset):
df = cudf.concat([train, valid])
data_window = df[[col, 'date', 'target']].groupby([col, 'date']).agg(['count', 'sum']).reset_index()
data_window.columns = [col, 'date', 'count', 'sum']
data_window.index = data_window['date']
data_window_roll = data_window[[col, 'count', 'sum']].groupby([col]).rolling(offset).sum().drop(col, axis=1)
data_window_roll = data_window_roll.reset_index()
data_window_roll.columns = [col, 'date', col + '_count_' + offset, col + '_sum_' + offset]
data_window_roll[[col + '_count_' + offset, col + '_sum_' + offset]] = data_window_roll[[col + '_count_' + offset, col + '_sum_' + offset]].shift(1)
data_window_roll.loc[data_window_roll[col]!=data_window_roll[col].shift(1), [col + '_count_' + offset, col + '_sum_' + offset]] = 0
data_window_roll[col + '_avg_' + offset] = (data_window_roll[col + '_sum_' + offset]/data_window_roll[col + '_count_' + offset]).fillna(-1)
df = df.merge(data_window_roll, how='left', on=[col, 'date'])
train = df[df['ts_month']!=3]
valid = df[df['ts_month']==3]
return(train, valid)train['cat_012'] = train['cat_0'].astype(str) + '_' + train['cat_1'].astype(str) + '_' + train['cat_2'].astype(str)
valid['cat_012'] = valid['cat_0'].astype(str) + '_' + valid['cat_1'].astype(str) + '_' + valid['cat_2'].astype(str)group_binning(train, valid)
train, valid, name = target_encode2(train, valid, ['price_bin'], 'target', smooth=20)train['date'] = cudf.from_pandas(pd.to_datetime(train['timestamp'].to_pandas()).dt.date)
valid['date'] = cudf.from_pandas(pd.to_datetime(valid['timestamp'].to_pandas()).dt.date)train.columnstrain['product_user'] = train['product_id'].astype(str) + '_' + train['user_id'].astype(str) + '_' + train['cat_2'].astype(str)
valid['product_user'] = valid['product_id'].astype(str) + '_' + valid['user_id'].astype(str) + '_' + valid['cat_2'].astype(str)
# LABEL ENCODE CATEGORIES
comb = cudf.concat([train,valid],ignore_index=True)
for c in ['product_user']:
tmp,code = comb[c].factorize()
train[c] = tmp[:len(train)].values
valid[c] = tmp[len(train):].valuestrain.columnstrain.drop(['timestamp', 'cat_012', 'price_bin', 'date'] , inplace=True)
valid.drop(['timestamp', 'cat_012', 'price_bin', 'date'] , inplace=True)#train, valid = rolling_window(train, valid, 'product_user', '1D')
#train, valid = rolling_window(train, valid, 'product_user', '7D')
#train, valid = rolling_window(train, valid, 'product_user', '14D')train.to_parquet('train_fe.parquet')
valid.to_parquet('valid_fe.parquet')import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)import cudftrain = cudf.read_parquet('train_fe.parquet')
valid = cudf.read_parquet('valid_fe.parquet')train.columnsfeatures = [
'price',
'product_id',
'brand',
'user_id',
'cat_0',
'cat_1',
'cat_2',
'cat_3',
'ts_hour',
'ts_minute',
'ts_weekday',
'ts_day',
'ts_month',
'ts_year',
'product_id_user_id',
'brand_user_id',
'ts_hour_user_id',
'ts_minute_user_id',
'ts_hour_user_id_brand',
'ts_weekday_user_id_brand',
'cat_0_user_id_brand',
'cat_1_user_id_brand',
'cat_2_user_id_brand',
'brand_product_id_count',
'user_id_product_id_count',
'cat_0_product_id_count',
'TE_brand_target',
'TE_user_id_target',
'TE_product_id_target',
'TE_cat_2_target',
'TE_ts_weekday_ts_day_target',
'TE_price_bin_20'
]xgb_parms = {
'max_depth':12,
'learning_rate':0.02,
'subsample':0.4,
'colsample_bytree':0.4,
#'eval_metric':'logloss',
'eval_metric':'auc',
'objective':'binary:logistic',
'tree_method':'gpu_hist',
'seed': 123
}import xgboost as xgb
NROUND = 1000
ESR = 50
VERBOSE_EVAL = 25
dtrain = xgb.DMatrix(data=train[features],label=train.target)
dvalid = xgb.DMatrix(data=valid[features],label=valid.target)
model = xgb.train(xgb_parms,
dtrain=dtrain,
evals=[(dtrain,'train'),(dvalid,'valid')],
num_boost_round=NROUND,
early_stopping_rounds=ESR,
verbose_eval=VERBOSE_EVAL) | 0 |
rapidsai_public_repos/deeplearning/RecSys2020Tutorial | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/solutions/05_2_TimeSeries_Differences.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport IPython
import pandas as pd
import numpy as np
import cudf
import cupy
np.random.seed(42)itemid = [1000001]*10 + [1000002]*5 + [1000001]*5 + [1000002]*5 + [1000001]*1 + [1000002]*1 + [1000001]*2 + [1000002]*2
itemid += [1000001]*3 + [1000002]*2 + [1000001]*1 + [1000002]*1 + [1000001]*6 + [1000002]*3 + [1000001]*2 + [1000002]*2
userid = np.random.choice(list(range(10000)), len(itemid))
action = np.random.choice(list(range(2)), len(itemid), p=[0.2, 0.8])
price = [100.00]*10 + [25.00]*5 + [100.00]*5 + [30.00]*5 + [125.00]*1 + [30.00]*1 + [125.00]*2 + [30.00]*2
price += [110.00]*3 + [30.00]*2 + [110.00]*1 + [20.00]*1 + [90.00]*6 + [20.00]*3 + [90.00]*2 + [20.00]*2
timestamp = [pd.to_datetime('2020-01-01')]*15
timestamp += [pd.to_datetime('2020-01-02')]*10
timestamp += [pd.to_datetime('2020-01-03')]*2
timestamp += [pd.to_datetime('2020-01-04')]*4
timestamp += [pd.to_datetime('2020-01-05')]*5
timestamp += [pd.to_datetime('2020-01-07')]*2
timestamp += [pd.to_datetime('2020-01-08')]*9
timestamp += [pd.to_datetime('2020-01-09')]*4
data = pd.DataFrame({
'itemid': itemid,
'userid': userid,
'price': price,
'action': action,
'timestamp': timestamp
})
data = cudf.from_pandas(data)data[data['itemid']==1000001].head(10)offset = 1
data_shift = data[['itemid', 'timestamp', 'price']].groupby(['itemid', 'timestamp']).mean().reset_index()
data_shift.columns = ['itemid', 'timestamp', 'mean']
data_shift['mean_' + str(offset)] = data_shift['mean'].shift(1)
data_shift.loc[data_shift['itemid']!=data_shift['itemid'].shift(1), 'mean_' + str(offset)] = None
data_shift['diff_' + str(offset)] = data_shift['mean'] - data_shift['mean_' + str(offset)]data_shift.head(10)data_shift.columns = ['itemid', 'timestamp', 'c1', 'c2', 'price_diff_1']
data_shift.drop(['c1', 'c2'], inplace=True).head(10)data = data.merge(data_shift, how='left', on=['itemid', 'timestamp'])data.head()import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('../data/train.parquet')
df_valid = cudf.read_parquet('../data/valid.parquet')
df_test = cudf.read_parquet('../data/test.parquet')
df_train['brand'] = df_train['brand'].fillna('UNKNOWN')
df_valid['brand'] = df_valid['brand'].fillna('UNKNOWN')
df_test['brand'] = df_test['brand'].fillna('UNKNOWN')
df_train['cat_0'] = df_train['cat_0'].fillna('UNKNOWN')
df_valid['cat_0'] = df_valid['cat_0'].fillna('UNKNOWN')
df_test['cat_0'] = df_test['cat_0'].fillna('UNKNOWN')
df_train['cat_1'] = df_train['cat_1'].fillna('UNKNOWN')
df_valid['cat_1'] = df_valid['cat_1'].fillna('UNKNOWN')
df_test['cat_1'] = df_test['cat_1'].fillna('UNKNOWN')
df_train['cat_2'] = df_train['cat_2'].fillna('UNKNOWN')
df_valid['cat_2'] = df_valid['cat_2'].fillna('UNKNOWN')
df_test['cat_2'] = df_test['cat_2'].fillna('UNKNOWN')df_train['date'] = cudf.from_pandas(pd.to_datetime(df_train['timestamp'].to_pandas()).dt.date)############### Solution ###############offset = 1
data_shift = df_train[['product_id', 'date', 'price']].groupby(['product_id', 'date']).mean().reset_index()
data_shift.columns = ['product_id', 'date', 'mean']
data_shift['mean_' + str(offset)] = data_shift['mean'].shift(1)
data_shift.loc[data_shift['product_id']!=data_shift['product_id'].shift(1), 'mean_' + str(offset)] = None
data_shift['diff_' + str(offset)] = data_shift['mean'] - data_shift['mean_' + str(offset)]
data_shift.columns = ['product_id', 'date', 'c1', 'c2', 'price_diff_1']
data_shift.drop(['c1', 'c2'], inplace=True)
df_train = df_train.merge(data_shift, how='left', on=['product_id', 'date'])############### Solution End ###########def difference_feature(df, offset):
data_shift = df[['product_id', 'date', 'price']].groupby(['product_id', 'date']).mean().reset_index()
data_shift.columns = ['product_id', 'date', 'mean']
data_shift['mean_' + str(offset)] = data_shift['mean'].shift(offset)
data_shift.loc[data_shift['product_id']!=data_shift['product_id'].shift(offset), 'mean_' + str(offset)] = None
data_shift['diff_' + str(offset)] = data_shift['mean'] - data_shift['mean_' + str(offset)]
data_shift.columns = ['product_id', 'date', 'c1', 'c2', 'price_diff_' + str(offset)]
data_shift.drop(['c1', 'c2'], axis=1, inplace=True)
df = df.merge(data_shift, how='left', on=['product_id', 'date'])df_train_pd = df_train.to_pandas()%%time
_ = difference_feature(df_train_pd, 1)%%time
_ = difference_feature(df_train, 1)import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(False) | 0 |
rapidsai_public_repos/deeplearning/RecSys2020Tutorial | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/solutions/00_0_Initial.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport pandas as pd
import globlist_files = glob.glob('../data/*.csv')def process_files(file):
df_tmp = pd.read_csv(file)
df_tmp['session_purchase'] = df_tmp['user_session'] + '_' + df_tmp['product_id'].astype(str)
df_purchase = df_tmp[df_tmp['event_type']=='purchase']
df_cart = df_tmp[df_tmp['event_type']=='cart']
df_purchase = df_purchase[df_purchase['session_purchase'].isin(df_cart['session_purchase'])]
df_cart = df_cart[~(df_cart['session_purchase'].isin(df_purchase['session_purchase']))]
df_cart['target'] = 0
df_purchase['target'] = 1
df = pd.concat([df_cart, df_purchase])
df = df.drop('category_id', axis=1)
df = df.drop('session_purchase', axis=1)
df[['cat_0', 'cat_1', 'cat_2', 'cat_3']] = df['category_code'].str.split("\.", n = 3, expand = True).fillna('NA')
df['brand'] = df['brand'].fillna('NA')
df = df.drop('category_code', axis=1)
df['timestamp'] = pd.to_datetime(df['event_time'].str.replace(' UTC', ''))
df['ts_hour'] = df['timestamp'].dt.hour
df['ts_minute'] = df['timestamp'].dt.minute
df['ts_weekday'] = df['timestamp'].dt.weekday
df['ts_day'] = df['timestamp'].dt.day
df['ts_month'] = df['timestamp'].dt.month
df['ts_year'] = df['timestamp'].dt.year
df.to_csv('./' + file.replace('../data/', ''), index=False)list_filesfor file in list_files:
print(file)
process_files(file)lp = []
list_files = glob.glob('./*.csv')for file in list_files:
lp.append(pd.read_csv(file))df = pd.concat(lp)df.shapedf_test = df[df['ts_month']==4]
df_valid = df[df['ts_month']==3]
df_train = df[(df['ts_month']!=3)&(df['ts_month']!=4)]df_train.shape, df_valid.shape, df_test.shapedf_train.to_parquet('../data/train.parquet', index=False)df_valid.to_parquet('../data/valid.parquet', index=False)df_test.to_parquet('../data/test.parquet', index=False) | 0 |
rapidsai_public_repos/deeplearning/RecSys2020Tutorial | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/solutions/02_1_Preprocessing.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport IPython
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('../data/train.parquet')
df_valid = cudf.read_parquet('../data/valid.parquet')
df_test = cudf.read_parquet('../data/test.parquet')df_train.isna().sum()df_train[['brand', 'target']].groupby(['brand']).agg(['mean', 'count']).sort_values(('target', 'count'), ascending=False).head(10)cols = ['brand', 'user_session', 'cat_0', 'cat_1', 'cat_2', 'cat_3']
for col in cols:
df_train['NA_' + col] = df_train[col].isna().astype(np.int8)
df_train[col].fillna('UNKNOWN', inplace=True)df_train.isna().sum()df_train[['brand', 'target']].groupby(['brand']).agg(['mean', 'count']).sort_values(('target', 'count'), ascending=False).head(10)np.random.seed(42)
df_train.loc[np.random.random(df_train.shape[0])<0.01, 'price'] = None
df_train['price'].isna().mean()df_median = df_train[['cat_2', 'price']].groupby('cat_2').median().reset_index()
df_median.columns = ['cat_2', 'price_median_per_cat2']
df_train = df_train.merge(df_median, how='left', on='cat_2')df_train['NA_price'] = df_train[col].isna().astype(np.int8)
df_train.loc[df_train['price'].isna(), 'price'] = df_train.loc[df_train['price'].isna(), 'price_median_per_cat2']
df_train.drop('price_median_per_cat2', inplace=True).head(5)df_train['price'].isna().mean()app = IPython.Application.instance()
app.kernel.do_shutdown(False) | 0 |
rapidsai_public_repos/deeplearning/RecSys2020Tutorial | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/solutions/04_1_Binning.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport IPython
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('../data/train.parquet')
df_valid = cudf.read_parquet('../data/valid.parquet')
df_train['brand'] = df_train['brand'].fillna('UNKNOWN')
df_valid['brand'] = df_valid['brand'].fillna('UNKNOWN')
df_train['cat_0'] = df_train['cat_0'].fillna('UNKNOWN')
df_valid['cat_0'] = df_valid['cat_0'].fillna('UNKNOWN')
df_train['cat_1'] = df_train['cat_1'].fillna('UNKNOWN')
df_valid['cat_1'] = df_valid['cat_1'].fillna('UNKNOWN')
df_train['cat_2'] = df_train['cat_2'].fillna('UNKNOWN')
df_valid['cat_2'] = df_valid['cat_2'].fillna('UNKNOWN')df_train.head()df_train[['ts_hour', 'target']].groupby('ts_hour').agg(['count', 'mean']).head(10)hour = list(range(0,24))
hour_bin = [0]*4 + [1]*4 + [2]*7 + [3]*6 + [4]*3
data = cudf.DataFrame({
'hour': hour,
'hour_bin': hour_bin,
})data.head(10)df_train = df_train.merge(data, how='left', right_on='hour', left_on='ts_hour')df_train[['hour_bin', 'target']].groupby('hour_bin').agg(['count', 'mean'])plt.hist(df_train[df_train['cat_2']=='headphone'].price.to_pandas(), bins=50)
plt.show()
plt.hist(df_train[df_train['cat_1']=='smartphone'].price.to_pandas(), bins=50)
plt.show()
print('Headphones mean price: ' + str(df_train[df_train['cat_2']=='headphone'].price.mean()) + ' median price: ' + str(df_train[df_train['cat_2']=='headphone'].price.median()))
print('Smartphones mean price: ' + str(df_train[df_train['cat_1']=='smartphone'].price.mean()) + ' median price: ' + str(df_train[df_train['cat_1']=='smartphone'].price.median()))df_train['cat_012'] = df_train['cat_0'].astype(str) + '_' + df_train['cat_1'].astype(str) + '_' + df_train['cat_2'].astype(str)q_list = [0.1, 0.25, 0.5, 0.75, 0.9]for q_value in q_list:
q = df_train[['cat_012', 'price']].groupby(['cat_012']).quantile(q_value)
q = q.reset_index()
q.columns = ['cat_012', 'price' + str(q_value)]
df_train = df_train.merge(q, how='left', on='cat_012')df_train['price_bin'] = -1
for i, q_value in enumerate(q_list):
if i == 0:
df_train.loc[df_train['price']<=df_train['price' + str(q_value)], 'price_bin'] = i
else:
df_train.loc[(df_train['price']>df_train['price' + str(q_list[i-1])]) & (df_train['price']<=df_train['price' + str(q_value)]), 'price_bin'] = i
df_train.loc[df_train['price']>df_train['price' + str(q_value)], 'price_bin'] = i+1df_train[df_train['price_bin']==3][['price', 'price0.1', 'price0.25', 'price0.5', 'price0.75', 'price0.9', 'price_bin']].drop_duplicates()df_train = df_train.drop(['price' + str(x) for x in q_list])df_train[['price_bin', 'target']].groupby('price_bin').agg(['count', 'mean'])### ToDo############### Solution ###############
df_train[['ts_weekday', 'target']].groupby('ts_weekday').agg(['count', 'mean'])weekday = list(range(0,7))
weekday_bin = [0, 1, 1, 2, 2, 2, 0]
data = cudf.DataFrame({
'weekday': weekday,
'weekday_bin': weekday_bin,
})df_train = df_train.merge(data, how='left', right_on='weekday', left_on='ts_weekday')df_train[['weekday_bin', 'target']].groupby('weekday_bin').agg(['count', 'mean'])# It is maybe counterintuitive:
# * the highest days are Sunday and Monday - a hypothesis could be that people
# shop on Sunday evening and the first day of the week
# * the lowest days are Thur-Sat - a hypothesis could be that Thu/Fri is end of
# week and people are finishing up their work and have no time to do online shopping.
# Saturday is maybe a day go outside############### Solution End ###########app = IPython.Application.instance()
app.kernel.do_shutdown(True)import IPython
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('../data/train.parquet')
df_train['brand'] = df_train['brand'].fillna('UNKNOWN')
df_train['cat_0'] = df_train['cat_0'].fillna('UNKNOWN')
df_train['cat_1'] = df_train['cat_1'].fillna('UNKNOWN')
df_train['cat_2'] = df_train['cat_2'].fillna('UNKNOWN')df_train['cat_012'] = df_train['cat_0'].astype(str) + '_' + df_train['cat_1'].astype(str) + '_' + df_train['cat_2'].astype(str)def group_binning(df, q_list = [0.1, 0.25, 0.5, 0.75, 0.9]):
df['price_bin'] = -1
for i, q_value in enumerate(q_list):
print(q_value)
q = df[['cat_012', 'price']].groupby(['cat_012']).quantile(q_value)
q = q.reset_index()
q.columns = ['cat_012', 'price' + str(q_value)]
df = df.merge(q, how='left', on='cat_012')
if i == 0:
df.loc[df['price']<=df['price' + str(q_value)], 'price_bin'] = i
else:
df.loc[(df['price']>df['price' + str(q_list[i-1])]) & (df['price']<=df['price' + str(q_value)]), 'price_bin'] = i
if i>=2:
df.drop(['price' + str(q_list[i-2])], axis=1, inplace=True)
df.loc[df['price']>df['price' + str(q_value)], 'price_bin'] = i+1
df.drop(['price' + str(q_list[i-1])], axis=1, inplace=True)
df.drop(['price' + str(q_list[i])], axis=1, inplace=True)
return(df)df_train_pd = df_train.to_pandas()%%time
df_train_pd = group_binning(df_train_pd)%%time
df_train = group_binning(df_train)df_train.head()app = IPython.Application.instance()
app.kernel.do_shutdown(False) | 0 |
rapidsai_public_repos/deeplearning/RecSys2020Tutorial | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/solutions/04_3_GaussRank.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport IPython
import pandas as pd
import cudf
import numpy as np
import cupy
import matplotlib.pyplot as plt
df_train = cudf.read_parquet('../data/train.parquet')
df_valid = cudf.read_parquet('../data/valid.parquet')
df_test = cudf.read_parquet('../data/test.parquet')
df_train['brand'] = df_train['brand'].fillna('UNKNOWN')
df_valid['brand'] = df_valid['brand'].fillna('UNKNOWN')
df_test['brand'] = df_test['brand'].fillna('UNKNOWN')
df_train['cat_0'] = df_train['cat_0'].fillna('UNKNOWN')
df_valid['cat_0'] = df_valid['cat_0'].fillna('UNKNOWN')
df_test['cat_0'] = df_test['cat_0'].fillna('UNKNOWN')
df_train['cat_1'] = df_train['cat_1'].fillna('UNKNOWN')
df_valid['cat_1'] = df_valid['cat_1'].fillna('UNKNOWN')
df_test['cat_1'] = df_test['cat_1'].fillna('UNKNOWN')
df_train['cat_2'] = df_train['cat_2'].fillna('UNKNOWN')
df_valid['cat_2'] = df_valid['cat_2'].fillna('UNKNOWN')
df_test['cat_2'] = df_test['cat_2'].fillna('UNKNOWN')def target_encode(train, valid, col, target, kfold=5, smooth=20, gpu=True):
"""
train: train dataset
valid: validation dataset
col: column which will be encoded (in the example RESOURCE)
target: target column which will be used to calculate the statistic
"""
# We assume that the train dataset is shuffled
train['kfold'] = ((train.index) % kfold)
# We keep the original order as cudf merge will not preserve the original order
if gpu:
train['org_sorting'] = cupy.arange(len(train), dtype="int32")
else:
train['org_sorting'] = np.arange(len(train), dtype="int32")
# We create the output column, we fill with 0
col_name = '_'.join(col)
train['TE_' + col_name] = 0.
for i in range(kfold):
###################################
# filter for out of fold
# calculate the mean/counts per group category
# calculate the global mean for the oof
# calculate the smoothed TE
# merge it to the original dataframe
###################################
df_tmp = train[train['kfold']!=i]
mn = df_tmp[target].mean()
df_tmp = df_tmp[col + [target]].groupby(col).agg(['mean', 'count']).reset_index()
df_tmp.columns = col + ['mean', 'count']
df_tmp['TE_tmp'] = ((df_tmp['mean']*df_tmp['count'])+(mn*smooth)) / (df_tmp['count']+smooth)
df_tmp_m = train[col + ['kfold', 'org_sorting', 'TE_' + col_name]].merge(df_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
df_tmp_m.loc[df_tmp_m['kfold']==i, 'TE_' + col_name] = df_tmp_m.loc[df_tmp_m['kfold']==i, 'TE_tmp']
train['TE_' + col_name] = df_tmp_m['TE_' + col_name].fillna(mn).values
###################################
# calculate the mean/counts per group for the full training dataset
# calculate the global mean
# calculate the smoothed TE
# merge it to the original dataframe
# drop all temp columns
###################################
df_tmp = train[col + [target]].groupby(col).agg(['mean', 'count']).reset_index()
mn = train[target].mean()
df_tmp.columns = col + ['mean', 'count']
df_tmp['TE_tmp'] = ((df_tmp['mean']*df_tmp['count'])+(mn*smooth)) / (df_tmp['count']+smooth)
if gpu:
valid['org_sorting'] = cupy.arange(len(valid), dtype="int32")
else:
valid['org_sorting'] = np.arange(len(valid), dtype="int32")
df_tmp_m = valid[col + ['org_sorting']].merge(df_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
valid['TE_' + col_name] = df_tmp_m['TE_tmp'].fillna(mn).values
valid = valid.drop('org_sorting', axis=1)
train = train.drop('kfold', axis=1)
train = train.drop('org_sorting', axis=1)
return(train, valid)cats = [['cat_0'], ['cat_1'], ['cat_2'], ['cat_0', 'cat_1', 'cat_2'], ['ts_hour'], ['ts_weekday'], ['ts_weekday', 'ts_hour', 'cat_2', 'brand']]for cat in cats:
df_train, df_valid = target_encode(df_train, df_valid, cat, 'target')cats = ['brand', 'user_id', 'product_id', 'cat_0', 'cat_1', 'cat_2']def count_encode(train, valid, col, gpu=True):
"""
train: train dataset
valid: validation dataset
col: column which will be count encoded (in the example RESOURCE)
"""
# We keep the original order as cudf merge will not preserve the original order
if gpu:
train['org_sorting'] = cupy.arange(len(train), dtype="int32")
else:
train['org_sorting'] = np.arange(len(train), dtype="int32")
train_tmp = train[col].value_counts().reset_index()
train_tmp.columns = [col, 'CE_' + col]
df_tmp = train[[col, 'org_sorting']].merge(train_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
train['CE_' + col] = df_tmp['CE_' + col].fillna(0).values
if gpu:
valid['org_sorting'] = cupy.arange(len(valid), dtype="int32")
else:
valid['org_sorting'] = np.arange(len(valid), dtype="int32")
df_tmp = valid[[col, 'org_sorting']].merge(train_tmp, how='left', left_on=col, right_on=col).sort_values('org_sorting')
valid['CE_' + col] = df_tmp['CE_' + col].fillna(0).values
valid = valid.drop('org_sorting', axis=1)
train = train.drop('org_sorting', axis=1)
return(train, valid)%%time
for cat in cats:
df_train, df_valid = count_encode(df_train, df_valid, cat, gpu=True)df_train.head()import cupy as cp
from cupyx.scipy.special import erfinv
import cudf as gd
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.special import erfinv as sp_erfinvdef gaussrank_cpu(data, epsilon = 1e-6):
r_cpu = data.argsort().argsort()
r_cpu = (r_cpu/r_cpu.max()-0.5)*2 # scale to (-1,1)
r_cpu = np.clip(r_cpu,-1+epsilon,1-epsilon)
r_cpu = sp_erfinv(r_cpu)
return(r_cpu)
def gaussrank_gpu(data, epsilon = 1e-6):
r_gpu = data.argsort().argsort()
r_gpu = (r_gpu/r_gpu.max()-0.5)*2 # scale to (-1,1)
r_gpu = cp.clip(r_gpu,-1+epsilon,1-epsilon)
r_gpu = erfinv(r_gpu)
return(r_gpu)fig, axs = plt.subplots(1, 2, figsize=(16,3))
col = 'CE_product_id'
data_sample = df_train[col].sample(frac=0.01)
axs[0].hist(data_sample.to_pandas().values, bins=50)
axs[1].hist(cp.asnumpy(gaussrank_gpu(df_train[col].values)), bins=50)
axs[0].set_title('Histogram non-normalized')
axs[1].set_title('Histogram Gauss Rank')############### Solution ###############
fig, axs = plt.subplots(3, 2, figsize=(16,9))
for i, col in enumerate(['price', 'TE_ts_weekday_ts_hour_cat_2_brand', 'CE_cat_2']):
data_sample = df_train[col].sample(frac=0.01)
axs[i, 0].hist(data_sample.to_pandas(), bins=50)
axs[i, 1].hist(cp.asnumpy(gaussrank_gpu(data_sample.values)), bins=50)
if i==0:
axs[i, 0].set_title('Histogram non-normalized')
axs[i, 1].set_title('Histogram Gauss Rank')############### Solution End ###########data_cpu = df_train['TE_ts_weekday_ts_hour_cat_2_brand'].to_pandas().values
data_gpu = df_train['TE_ts_weekday_ts_hour_cat_2_brand'].values%%time
gaussrank_cpu(data_cpu)%%time
gaussrank_gpu(data_gpu)app = IPython.Application.instance()
app.kernel.do_shutdown(False) | 0 |
rapidsai_public_repos/deeplearning/RecSys2020Tutorial | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/solutions/03_1_CombineCategories.ipynb | # The MIT License (MIT)
# Copyright (c) 2020, NVIDIA CORPORATION.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREimport IPython
import cudf
import pandas as pd
import numpy as npf1 = [0]*45 + [1]*45 + [2]*10 + [0]*5 + [1]*5 + [2]*90 + [0]*5 + [1]*5 + [2]*90 + [0]*45 + [1]*45 + [2]*10
f2 = [0]*45 + [0]*45 + [0]*10 + [1]*5 + [1]*5 + [1]*90 + [0]*5 + [0]*5 + [0]*90 + [1]*45 + [1]*45 + [1]*10
t = [1]*45 + [1]*45 + [1]*10 + [1]*5 + [1]*5 + [1]*90 + [0]*5 + [0]*5 + [0]*90 + [0]*45 + [0]*45 + [0]*10
data = cudf.DataFrame({
'f1': f1,
'f2': f2,
})
for i in range(3,5):
data['f' + str(i)] = np.random.choice(list(range(3)), data.shape[0])
data['target'] = tdata.head()data.groupby('f1').target.agg(['mean', 'count'])data.groupby('f2').target.agg(['mean', 'count'])data.groupby(['f1', 'f2']).target.agg(['mean', 'count'])df = data.to_pandas()import pydotplus
import sklearn.tree as tree
from IPython.display import Imagedef get_hotn_features(df):
out = []
for col in df.columns:
if col != 'target':
out.append(pd.get_dummies(df[col], prefix=col))
return(pd.concat(out, axis=1))
def viz_tree(df, lf):
dt_feature_names = list(get_hotn_features(df).columns)
dt_target_names = 'target'
tree.export_graphviz(lf, out_file='tree.dot',
feature_names=dt_feature_names, class_names=dt_target_names,
filled=True)
graph = pydotplus.graph_from_dot_file('tree.dot')
return(graph.create_png())lf = tree.DecisionTreeClassifier(max_depth=2)
lf.fit(get_hotn_features(df), df[['target']])
Image(viz_tree(df, lf))df['f1_f2'] = df['f1'].astype(str) + df['f2'].astype(str)lf.fit(get_hotn_features(df), df[['target']])
Image(viz_tree(df, lf))df.groupby([x for x in df.columns if 'target' not in x and 'f1_f2' not in x]).target.agg(['mean', 'count']).head(10)df.astype(str).describe()import cudfdf_train = cudf.read_parquet('../data/train.parquet')df_train.head()###ToDo
def explore_cat(df, cats):
df_agg = df_train[cats + ['target']].groupby(cats).agg(['mean', 'count']).reset_index()
df_agg.columns = cats + ['mean', 'count']
print(df_agg.sort_values('count', ascending=False).head(20))
cats = ['product_id', 'user_id']
explore_cat(df_train, cats)############### Solution ###############cats = ['ts_weekday', 'ts_hour']
explore_cat(df_train, cats)cats = ['cat_2', 'brand']
explore_cat(df_train, cats)############### Solution End ###########big_df = df_train.to_pandas()
big_data = df_trainprint('Pandas Shape:' + str(big_df.shape))
print('cudf Shape:' + str(big_df.shape))%%time
big_df.groupby(['cat_0', 'cat_1', 'cat_2', 'cat_3', 'brand']).target.agg(['mean', 'count'])
print('')%%time
big_data.groupby(['cat_0', 'cat_1', 'cat_2', 'cat_3', 'brand']).target.agg(['mean', 'count'])
print('')app = IPython.Application.instance()
app.kernel.do_shutdown(False) | 0 |
rapidsai_public_repos/deeplearning/RecSys2020Tutorial | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/images/dask-dataframe.svg | <?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="812.37378"
height="1011.8721"
id="svg2"
version="1.1"
inkscape:version="0.48.4 r9939"
sodipodi:docname="New document 1">
<defs
id="defs4" />
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="0.5"
inkscape:cx="68.623957"
inkscape:cy="300.16925"
inkscape:document-units="px"
inkscape:current-layer="layer1"
showgrid="true"
fit-margin-top="0"
fit-margin-left="0"
fit-margin-right="0"
fit-margin-bottom="0"
inkscape:window-width="1600"
inkscape:window-height="876"
inkscape:window-x="0"
inkscape:window-y="24"
inkscape:window-maximized="1">
<inkscape:grid
type="xygrid"
id="grid2985"
empspacing="5"
visible="true"
enabled="true"
snapvisiblegridlinesonly="true"
spacingx="2px"
spacingy="2px"
originx="45.029487px"
originy="-244.02136px" />
</sodipodi:namedview>
<metadata
id="metadata7">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1"
transform="translate(45.029487,203.53131)">
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:5.58885813;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect2987"
width="251.49863"
height="195.61005"
x="246.14598"
y="-200.72348" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:5.58885813;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect3796"
width="251.49863"
height="139.72145"
x="246.14598"
y="22.830854" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:5.58885813;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect3798"
width="251.49863"
height="307.38721"
x="246.14598"
y="190.4966" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:5.58885813;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect3800"
width="251.49863"
height="167.66576"
x="246.14598"
y="525.82812" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:5.58885813;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect3802"
width="251.49863"
height="83.832878"
x="246.14598"
y="721.43817" />
<text
xml:space="preserve"
style="font-size:33.53314972px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Ubuntu;-inkscape-font-specification:Ubuntu"
x="78.480225"
y="-116.8906"
id="text3804"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan3806"
x="78.480225"
y="-116.8906"
style="font-size:39.12200928px">January, 2016</tspan></text>
<text
xml:space="preserve"
style="font-size:33.53314972px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Ubuntu;-inkscape-font-specification:Ubuntu"
x="78.480225"
y="106.66373"
id="text3804-1"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan3806-5"
x="78.480225"
y="106.66373"
style="font-size:39.12200928px">Febrary, 2016</tspan></text>
<text
xml:space="preserve"
style="font-size:33.53314972px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Ubuntu;-inkscape-font-specification:Ubuntu"
x="78.480225"
y="358.16235"
id="text3804-3"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan3806-9"
x="78.480225"
y="358.16235"
style="font-size:39.12200928px">March, 2016</tspan></text>
<text
xml:space="preserve"
style="font-size:33.53314972px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Ubuntu;-inkscape-font-specification:Ubuntu"
x="78.480225"
y="609.66095"
id="text3804-5"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan3806-52"
x="78.480225"
y="609.66095"
style="font-size:39.12200928px">April, 2016</tspan></text>
<text
xml:space="preserve"
style="font-size:33.53314972px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Ubuntu;-inkscape-font-specification:Ubuntu"
x="78.480225"
y="777.32672"
id="text3804-51"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan3806-7"
x="78.480225"
y="777.32672"
style="font-size:39.12200928px">May, 2016</tspan></text>
<text
xml:space="preserve"
style="font-size:33.53314972px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Ubuntu;-inkscape-font-specification:Ubuntu"
x="671.9798"
y="78.719437"
id="text3886"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan3888"
x="676.48798"
y="78.719437"
style="font-size:39.12200928px">Pandas </tspan><tspan
sodipodi:role="line"
x="671.9798"
y="127.62195"
style="font-size:39.12200928px"
id="tspan3894">DataFrame</tspan></text>
<text
xml:space="preserve"
style="font-size:122.2452774px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Ubuntu;-inkscape-font-specification:Ubuntu"
x="645.08667"
y="114.33765"
id="text3890"
sodipodi:linespacing="125%"
transform="scale(0.849412,1.177285)"><tspan
sodipodi:role="line"
id="tspan3892"
x="645.08667"
y="114.33765">}</tspan></text>
<text
xml:space="preserve"
style="font-size:33.53314972px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Ubuntu;-inkscape-font-specification:Ubuntu"
x="661.44617"
y="296.68491"
id="text3896"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan3898"
x="665.95435"
y="296.68491"
style="font-size:39.12200928px">Dask </tspan><tspan
sodipodi:role="line"
x="661.44617"
y="345.5874"
id="tspan3904"
style="font-size:39.12200928px">DataFrame</tspan></text>
<text
xml:space="preserve"
style="font-size:355.3336792px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Ubuntu;-inkscape-font-specification:Ubuntu"
x="1527.4156"
y="209.45833"
id="text3900"
sodipodi:linespacing="125%"
transform="scale(0.34044434,2.9373377)"><tspan
sodipodi:role="line"
id="tspan3902"
x="1527.4156"
y="209.45833">}</tspan></text>
</g>
</svg>
| 0 |
rapidsai_public_repos/deeplearning/RecSys2020Tutorial | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/images/dask-array-black-text.svg | <?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="526.211"
height="252.00002"
id="svg3277"
version="1.1"
inkscape:version="0.48.4 r9939"
sodipodi:docname="dask-array-black-text.svg">
<defs
id="defs3279" />
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="1.4142136"
inkscape:cx="313.40295"
inkscape:cy="40.810933"
inkscape:document-units="px"
inkscape:current-layer="layer1"
showgrid="true"
fit-margin-top="0"
fit-margin-left="0"
fit-margin-right="0"
fit-margin-bottom="0"
inkscape:window-width="1600"
inkscape:window-height="876"
inkscape:window-x="0"
inkscape:window-y="24"
inkscape:window-maximized="1">
<inkscape:grid
type="xygrid"
id="grid3292"
empspacing="5"
visible="true"
enabled="true"
snapvisiblegridlinesonly="true"
originx="-398.9989px"
originy="-528.98717px" />
</sodipodi:namedview>
<metadata
id="metadata3282">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1"
transform="translate(-398.9989,-271.375)">
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect4572"
width="60"
height="120"
x="-522.36218"
y="400"
transform="matrix(0,-1,1,0,0,0)" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect4594"
width="60"
height="120"
x="-452.36218"
y="400"
transform="matrix(0,-1,1,0,0,0)" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect4598"
width="80"
height="120"
x="-382.36218"
y="400"
transform="matrix(0,-1,1,0,0,0)" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect4600"
width="20"
height="120"
x="-292.36218"
y="400"
transform="matrix(0,-1,1,0,0,0)" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect4602"
width="60"
height="30"
x="-522.36218"
y="530"
transform="matrix(0,-1,1,0,0,0)" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect4604"
width="60"
height="30"
x="-452.36218"
y="530"
transform="matrix(0,-1,1,0,0,0)" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect4606"
width="80"
height="30"
x="-382.36218"
y="530"
transform="matrix(0,-1,1,0,0,0)" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect4608"
width="20"
height="30"
x="-292.36218"
y="530"
transform="matrix(0,-1,1,0,0,0)" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect4610"
width="60"
height="60"
x="-522.36218"
y="570"
transform="matrix(0,-1,1,0,0,0)" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect4612"
width="60"
height="60"
x="-452.36218"
y="570"
transform="matrix(0,-1,1,0,0,0)" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect4614"
width="80"
height="60"
x="-382.36218"
y="570"
transform="matrix(0,-1,1,0,0,0)" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect4616"
width="20"
height="60"
x="-292.36218"
y="570"
transform="matrix(0,-1,1,0,0,0)" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect4618"
width="60"
height="110"
x="-522.36218"
y="640"
transform="matrix(0,-1,1,0,0,0)" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect4620"
width="60"
height="110"
x="-452.36218"
y="640"
transform="matrix(0,-1,1,0,0,0)" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect4622"
width="80"
height="110"
x="-382.36218"
y="640"
transform="matrix(0,-1,1,0,0,0)" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect4624"
width="20"
height="110"
x="-292.36218"
y="640"
transform="matrix(0,-1,1,0,0,0)" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect4626"
width="60"
height="50"
x="-522.36218"
y="760"
transform="matrix(0,-1,1,0,0,0)" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect4628"
width="60"
height="50"
x="-452.36218"
y="760"
transform="matrix(0,-1,1,0,0,0)" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect4630"
width="80"
height="50"
x="-382.36218"
y="760"
transform="matrix(0,-1,1,0,0,0)" />
<rect
style="opacity:0.6;fill:#0000b0;fill-opacity:0.50196078;stroke:#000000;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:0.44292238;stroke-dasharray:none;stroke-dashoffset:0"
id="rect4632"
width="20"
height="50"
x="-292.36218"
y="760"
transform="matrix(0,-1,1,0,0,0)" />
<text
xml:space="preserve"
style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Ubuntu;-inkscape-font-specification:Ubuntu"
x="848.61719"
y="337.36218"
id="text4653"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan4655"
x="850.23047"
y="337.36218"
style="font-size:14px;fill:#000000;fill-opacity:1">NumPy </tspan><tspan
sodipodi:role="line"
x="848.61719"
y="354.86218"
id="tspan4657"
style="font-size:14px;fill:#000000;fill-opacity:1">Array</tspan></text>
<text
xml:space="preserve"
style="font-size:44.57763289999999756px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Ubuntu;-inkscape-font-specification:Ubuntu"
x="1510.3115"
y="198.09331"
id="text4659"
sodipodi:linespacing="125%"
transform="scale(0.53962379,1.8531429)"><tspan
sodipodi:role="line"
id="tspan4661"
x="1510.3115"
y="198.09331">}</tspan></text>
<text
xml:space="preserve"
style="font-size:107.32640838999999744px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Ubuntu;-inkscape-font-specification:Ubuntu"
x="2106.272"
y="197.52885"
id="text4671"
sodipodi:linespacing="125%"
transform="scale(0.41670775,2.3997634)"><tspan
sodipodi:role="line"
id="tspan4673"
x="2106.272"
y="197.52885">}</tspan></text>
<text
xml:space="preserve"
style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Ubuntu;-inkscape-font-specification:Ubuntu"
x="908.38672"
y="387.36218"
id="text4653-3"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
x="910"
y="387.36218"
id="tspan4699"
style="font-size:14px;fill:#000000;fill-opacity:1">Dask </tspan><tspan
sodipodi:role="line"
x="908.38672"
y="404.86218"
id="tspan4657-0"
style="font-size:14px;fill:#000000;fill-opacity:1">Array</tspan></text>
</g>
</svg>
| 0 |
rapidsai_public_repos/deeplearning/RecSys2020Tutorial | rapidsai_public_repos/deeplearning/RecSys2020Tutorial/data/README.md | # RecSys2020 Tutorial: Feature Engineering for Recommender Systems
by Chris Deotte (Nvidia), Benedikt Schifferer (Nvidia) and Even Oldridge (Nvidia)
### Content
The selection of features and proper preparation of data for deep learning or machine learning models plays a significant role in the performance of recommender systems. To address this we propose a tutorial highlighting best practices and optimization techniques for feature engineering and preprocessing of recommender system datasets. The tutorial will explore feature engineering using pandas and Dask, and will also cover acceleration on the GPU using open source libraries like RAPIDS and NVTabular. Proposed length is 180min. We’ve designed the tutorial as a combination of a lecture covering the mathematical and theoretical background and an interactive session based on jupyter notebooks. Participants will practice the discussed features by writing their own implementation in Python. NVIDIA will host the tutorial on their infrastructure, providing dataset, jupyter notebooks and GPUs. Participants will be able to easily attend the tutorial via their web browsers, avoiding any complicated setup.
Beginner to intermediate users are the target audience, which should have prior knowledge in python programming using libraries, such as pandas and NumPy. In addition, they should have a basic understanding of recommender systems, decision trees and feed forward neural networks.
### Requirements
* RAPIDS cuDF 0.15
* NVTabular 0.2
* PyTorch
### Structure
* Notebooks contains theory and exercises
* /solutions/ contains solutions for the exercises
* /data/ is the path with the expected parquet files
| 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/champs-scalar-coupling/bootstrap_model.py | from mpnn_model.common import *
from torch_scatter import *
from torch_geometric.utils import scatter_
import torch
import torch.nn as nn
import torch.nn.functional as F
import numbers
# Fast ai
from fastai.tabular import *
from fastai.callbacks import SaveModelCallback
#__all__ = ['LinearBn', 'MlpBn', 'CustomTabularModel', 'get_node_encoder', 'get_edge_encoder',
# 'GraphConv', 'Set2Set', 'get_regression_module', 'Net' ]
#############################################################################################################
# #
# Base-line models #
# #
#############################################################################################################
class LinearBn(nn.Module):
'''
Batch norm dense layer
Arguments:
- in_channel: int, Input dimension
- out_channel: int, Output dimension
- act: str, Activation function to apply to the output of batch normalizaiton.
'''
def __init__(self, in_channel, out_channel, act=None):
super(LinearBn, self).__init__()
self.linear = nn.Linear(in_channel, out_channel, bias=False)
self.bn = nn.BatchNorm1d(out_channel, eps=1e-05, momentum=0.1)
if act is not None :
self.act = F.__dict__[act]
else:
self.act = act
def forward(self, x):
x = self.linear(x)
if self.bn is not None:
x = self.bn(x)
if self.act is not None:
x = self.act(x)
return x
class MlpBn(nn.Module):
''' Fully connected feed forward neural network: stacked batch norm layers with dropout
Args:
input_dim (int32): the dimension of input
dimensions (int32): the dimension of hiddenlayers.
act (string): Activation function to apply to the output of each layer.
dropout (float): the dropout probabily to apply to each layer.
'''
def __init__(self,
input_dim,
dimensions,
activation='Relu',
dropout=0.):
super(MlpBn, self).__init__()
self.input_dim = input_dim
self.dimensions = dimensions
self.activation = activation
self.dropout = dropout
# Modules
self.linears = nn.ModuleList([LinearBn(input_dim, dimensions[0], act=activation)])
for din, dout in zip(dimensions[:-1], dimensions[1:]):
self.linears.append(LinearBn(din, dout, act=self.activation))
def forward(self, x):
for i,lin in enumerate(self.linears):
x = lin(x)
if self.dropout > 0:
x = F.dropout(x, self.dropout, training=self.training)
return x
#############################################################################################################
# #
# Node and edge encoders #
# #
#############################################################################################################
class CustomTabularModel(nn.Module):
"Basic model for tabular data."
def __init__(self, emb_szs:ListSizes, n_cont:int, out_sz:int, layers:Collection[int], ps:Collection[float]=None,
emb_drop:float=0., y_range:OptRange=None, use_bn:bool=True, bn_final:bool=False):
super().__init__()
ps = ifnone(ps, [0]*len(layers))
ps = listify(ps, layers)
#self.bsn = BatchSwapNoise(0.15)
self.embeds = nn.ModuleList([embedding(ni, nf) for ni,nf in emb_szs])
self.emb_drop = nn.Dropout(emb_drop)
self.bn_cont = nn.BatchNorm1d(n_cont)
n_emb = sum(e.embedding_dim for e in self.embeds)
self.n_emb,self.n_cont,self.y_range = n_emb,n_cont,y_range
sizes = self.get_sizes(layers, out_sz)
actns = [nn.ReLU(inplace=True) for _ in range(len(sizes)-2)] + [None]
layers = []
for i,(n_in,n_out,dp,act) in enumerate(zip(sizes[:-1],sizes[1:],[0.]+ps,actns)):
layers += bn_drop_lin(n_in, n_out, bn=use_bn and i!=0, p=dp, actn=act)
if bn_final: layers.append(nn.BatchNorm1d(sizes[-1]))
layers = layers[:-2]
self.layers = nn.Sequential(*layers)
def get_sizes(self, layers, out_sz):
return [self.n_emb + self.n_cont] + layers + [out_sz]
def forward(self, x_cat:Tensor, x_cont:Tensor) -> Tensor:
#self.bsn(x_cat)
if self.n_emb != 0:
x = [e(x_cat[:,i]) for i,e in enumerate(self.embeds)]
x = torch.cat(x, 1)
x = self.emb_drop(x)
if self.n_cont != 0:
x_cont = self.bn_cont(x_cont)
x = torch.cat([x, x_cont], 1) if self.n_emb != 0 else x_cont
x = self.layers(x)
return x
def get_node_encoder(encoding, emb_sz, n_cont, node_dim, layers, activation, dropout=0.):
'''
- Get the MLP network to process nodes features and build node representation
'''
if encoding == 'one_hot':
return MlpBn(node_dim, dimensions=layers, activation=activation, dropout=dropout)
elif encoding== 'label':
# embed symbol, acceptor, donor, aromatic, hybridization
# emb_sz = [(6,4), (3,3), (3,3), (3,3), (5,4)]
return CustomTabularModel(emb_szs = emb_sz, out_sz=2, n_cont=n_cont, layers=layers, ps=[dropout], emb_drop=0.)
def get_edge_encoder(encoding, emb_sz, n_cont, node_dim, edge_dim, layers, activation, dropout=0.):
'''
Get the MLP network to process edges features and build matrix representation
Arguments:
- encoding: str, the encoding of categorical variables : "label" vs "one_hot"
- emb_sz: list of tuples, the embedding size of each categorical variable
- n_cont: int, the number of continious variables
- node_dim: int, the dimension of node's representation
- edge_dim: int, the input dimension of edge's features
- layers: list of int, the dimensions of hidden layers
- activation: str, the activation to apply for layers.
- dropout: [float], dropout of each hidden layer.
'''
if encoding == 'one_hot':
return MlpBn(edge_dim, dimensions=layers+[node_dim*node_dim], activation=activation, dropout=dropout)
elif encoding== 'label':
# emb_sz = [(5,8)]
return CustomTabularModel(emb_szs = emb_sz, n_cont=n_cont , out_sz=2, layers=layers+[node_dim*node_dim], ps=[dropout], emb_drop=0.)
#############################################################################################################
# #
# MPNN- PHASE1 : Message Passing #
# #
#############################################################################################################
def message_pass(node_states, edge_index, a_in):
"""Computes a_t from h_{t-1}, see bottom of page 3 in the paper.
a_t = sum_w A(e_vw) . h^t
Args:
node_states: [batch_size*num_nodes, node_dim] tensor (h_{t-1})
a_in (torch.float32): [batch_size*num_nodes, node_dim, node_dim]: Encoded edge matrix
edge_index [batch_size*num_edges, 2]: the indices of edges
Returns:
messages (torch.float32): [batch_size*num_nodes, node_dim] For each pair
of nodes in the graph a message is sent along both the incoming edge.
"""
num_node, node_dim = node_states.shape
edge_index = edge_index.t().contiguous()
x_i = torch.index_select(node_states, 0, edge_index[0])
message = torch.matmul( x_i.view(-1,1,node_dim), a_in).view(-1, node_dim)
message = scatter_('mean', message, edge_index[1], dim_size=num_node)
return message
class MessagePassing(nn.Module):
'''
A feed forward neural network is applied to each edge in the adjacency matrix,
which is assumed to be vector valued. It maps the edge vector to a
node_dim x node_dim matrix, denoted NN(e). The message from node v -> w is
then NN(e) h_v. This is a generalization of the message function in the
GG-NN paper, which embeds the discrete edge label as a matrix.
'''
def __init__(self, ConfigParams):
'''
'''
super(MessagePassing, self).__init__()
self.encoding = ConfigParams['model']['mpnn']['node_encoder']['encoding']
self.edge_encoder = get_edge_encoder(**ConfigParams['model']['mpnn']['edge_encoder'])
self.node_dim = ConfigParams['model']['mpnn']['edge_encoder']['node_dim']
self.bias = nn.Parameter(torch.Tensor(self.node_dim)).cuda()
self.bias.data.uniform_(-1.0 / math.sqrt(self.node_dim), 1.0 / math.sqrt(self.node_dim))
self._a_in = []
def _pre_encode_edges(self, edge):
'''
Args:
edge: [batch_size*num_edges, edge_dim] edge features
Return:
A neural representation of the edge festures where each vector is represented as
matrix of shape node_dim x node_dim
'''
if self.encoding == 'label':
edge_cat = edge[:, 0].long().view(-1,1)
edge_cont = edge[:, 1:].float()
edge = self.edge_encoder(edge_cat, edge_cont).view(-1,self.node_dim,self.node_dim)
elif self.encoding == 'one_hot':
edge = self.edge_encoder(edge).view(-1, self.node_dim, self.node_dim)
self._a_in = edge
def forward(self, node_states, edge_index, edge, reuse_graph_tensors=True):
'''
Args:
node_states: [batch_size*num_nodes, node_dim] tensor (h_{t-1})
edge_in: [batch_size*num_nodes, edge_dim] (torch.int32)
reuse_graph_tensors: Boolean to indicate whether or not the self._a_in
should be reused or not. Should be set to False on first call, and True
on subsequent calls.
Returns:
message_t: [batch_size * num_nodes, node_dim] which is the node representations
after a single propgation step
'''
if not reuse_graph_tensors:
self._pre_encode_edges(edge)
new_state = message_pass(node_states, edge_index, self._a_in)
return F.relu(new_state + self.bias)
#############################################################################################################
# #
# MPNN- PHASE2 : Updage nodes states #
# #
#############################################################################################################
class GRUUpdate(nn.Module):
def __init__(self, ConfigParams):
super(GRUUpdate, self).__init__()
self.node_dim = ConfigParams['model']['mpnn']['edge_encoder']['node_dim']
self.gru = nn.GRU(self.node_dim, self.node_dim, batch_first=False, bidirectional=False)
def forward(self, messages, node_states):
"""Build the fprop graph.
Args:
node_states: [batch_size*num_nodes, node_dim] tensor (h_{t-1})
messages: [batch_size*num_nodes, node_dim] (a_t from the GGNN paper)
Returns:
updated_states: [batch_size*num_nodes, node_dim]
"""
num_node, node_dim = node_states.shape
update, _ = self.gru(messages.view(1,-1,self.node_dim),
node_states.view(1,num_node,-1))
return update.view(-1,node_dim)
#############################################################################################################
# #
# MPNN- PHASE2 : Readout function #
# #
#############################################################################################################
class Set2Set(torch.nn.Module):
def softmax(self, x, index, num=None):
x = x - scatter_max(x, index, dim=0, dim_size=num)[0][index]
x = x.exp()
x = x / (scatter_add(x, index, dim=0, dim_size=num)[index] + 1e-16)
return x
def __init__(self, in_channel, processing_step=1, num_layer = 1, batch_size=32):
super(Set2Set, self).__init__()
out_channel = 2 * in_channel
self.processing_step = processing_step
self.batch_size = batch_size
self.in_channel = in_channel
self.out_channel = out_channel
self.num_layer = num_layer
self.lstm = torch.nn.LSTM(out_channel, in_channel, num_layer)
self.lstm.reset_parameters()
def forward(self, x, batch_index):
h = (x.new_zeros((self.num_layer, self.batch_size, self.in_channel)),
x.new_zeros((self.num_layer, self.batch_size, self.in_channel)))
# zeros of shape: bs x 2*node_dim : init q_star
q_star = x.new_zeros(self.batch_size, self.out_channel)
# n readout steps
for i in range(self.processing_step):
# read from memory
q, h = self.lstm(q_star.unsqueeze(0), h)
q = q.view(self.batch_size, -1)
#energies : dot product between input_set and q
e = (x * q[batch_index]).sum(dim=-1, keepdim=True) #shape = num_node x 1
# Compute attention
a = self.softmax(e, batch_index, num=self.batch_size) #shape = num_node x 1
#compute readout
r = scatter_add(a * x, batch_index, dim=0, dim_size=self.batch_size) #apply attention #shape = batch_size x ...
#update q_star
q_star = torch.cat([q, r], dim=-1)
# print(q_star.shape)
return q_star
#############################################################################################################
# #
# Nodes sequence : Attention-Bidirectional #
# #
#############################################################################################################
class BI_RNN_Nodes(torch.nn.Module):
def attention_neuralnet(self, rnn_out, state):
"""
#### credit to : https://github.com/wabyking/TextClassificationBenchmark
"""
merged_state = torch.cat([s for s in state],1) # merge the hidden states of the two directions
merged_state = merged_state.squeeze(0).unsqueeze(2)
# (batch, seq_len, cell_size) * (batch, cell_size, 1) = (batch, seq_len, 1)
weights = torch.bmm(rnn_out, merged_state)
weights = torch.nn.functional.softmax(weights.squeeze(2)).unsqueeze(2)
# (batch, cell_size, seq_len) * (batch, seq_len, 1) = (batch, cell_size, 1)
return torch.bmm(torch.transpose(rnn_out, 1, 2), weights).squeeze(2), weights
def __init__(self,
node_dim,
hidden_size,
num_layers,
dropout,
batch_first,
bidirectional,
rnn_model='LSTM',
attention=True):
super(BI_RNN_Nodes, self).__init__()
self.type_encoder = nn.Embedding(16, 32, padding_idx=0)
self.atomic_encoder = nn.Embedding(16, 32, padding_idx=0)
self.attention = attention
if rnn_model == 'LSTM':
self.rnn = nn.LSTM(input_size= node_dim + 64, hidden_size=hidden_size,
num_layers=num_layers, dropout=dropout,
batch_first=batch_first, bidirectional=bidirectional)
else:
raise LookupError('only support LSTM ')
def forward(self, x_nodes, x_coupling_type, x_atomic):
'''
x_nodes [batch_size x path_length x node_dim] : sequence of nodes embeddings of the coupling's shortest path
x_coupling_type [batch_size x 4 x 1]: sequence of in-coming bond type
'''
x_type = self.type_encoder(x_coupling_type+1).squeeze()
x_atomic = self.atomic_encoder(x_atomic+1).squeeze()
x = torch.cat([x_nodes, x_type, x_atomic], dim=2)
rnn_out, (final_hidden_state, final_cell_state) = self.rnn(x, None)
if self.attention:
last_tensor_item, weights = self.attention_neuralnet(rnn_out, final_hidden_state)
else:
# use mean instead of weighted attention
last_tensor = rnn_out[row_indices, :, :]
last_tensor_item = torch.mean(last_tensor, dim=1)
return last_tensor_item
#############################################################################################################
# #
# Output models #
# #
#############################################################################################################
def get_regression_module(num_output=1,
node_dim=128,
shared_layers=[1024, 512],
activation='relu',
dropout= 0.,
branch_layers=[],
num_target=8,
predict_type =False):
'''
Regression module
Outputs: 4 branches
dense_layer: shared branch that learns a dense representation from the concatenation of Graph representation,
nodes of the coupling edge reprensentation.
classify: if predict_type==True, Classification branch that computes the logits of the 8 classes of coupling type
predict: if num_output==1, Regression branch that computes scalar coupling constant vector: 8 values (per type)
predicition_layers: if num_output==5, 8 regression branches (one for each coupling types) that computes
the scalar coupling constant and the contribution components.
'''
predicition_layers = []
classify =[]
predict = []
dense_layer = LinearBn(node_dim*6, shared_layers[0], act=activation)
if num_output==1:
predict = nn.Sequential(
MlpBn(shared_layers[0], dimensions=shared_layers[1:], activation=activation, dropout=dropout),
nn.Linear(shared_layers[-1], num_target)
)
elif num_output == 5:
model = nn.Sequential(
MlpBn(shared_layers[0],
dimensions=branch_layers,
activation=activation,
dropout=dropout),
nn.Linear(branch_layers[-1], num_output))
predicition_layers = nn.ModuleList([model for i in range(num_target)])
if predict_type:
classify = nn.Sequential(
LinearBn( shared_layers[0], 512),
nn.ReLU(inplace=True),
nn.Linear(512, num_target),)
return dense_layer, classify, predict, predicition_layers
#############################################################################################################
# #
# END-to-END model #
# #
#############################################################################################################
class Net(torch.nn.Module):
def __init__(self,
ConfigParams,
num_target=8,
):
"""
Arguments:
mpnn: Dictionary with all the needed arguments for GraphConv and Set2Set modules.
regression: Dictionary with all the needed arguments for regression output module.
batch_size:
num_target:
predict_type:
"""
super(Net, self).__init__()
self.encoding = ConfigParams['model']['mpnn']['node_encoder']['encoding']
self.num_output = ConfigParams['model']['regression']['num_output']
self.predict_type = ConfigParams['model']['regression']['predict_type']
self.y_range = ConfigParams['model']['Classif']['y_range']
self.node_dim = ConfigParams['model']['mpnn']['edge_encoder']['node_dim']
###################------------- MPNN representation ---------------####################
self.num_propagate = ConfigParams['model']['mpnn']['T_steps']
# Process the nodes features
self.preprocess = get_node_encoder(**ConfigParams['model']['mpnn']['node_encoder'])
# Message
self.message_function = MessagePassing(ConfigParams)
#Update
self.update_function = GRUUpdate(ConfigParams)
#readout
self.readout = Set2Set(**ConfigParams['model']['mpnn']['Set2Set'])
###################---------------- Build predictions ------------------######################
self.rnn_attention = BI_RNN_Nodes(**ConfigParams['model']['node_seq'])
self.dense_layer, self.classify, self.predict, self.predicition_layers = get_regression_module(**ConfigParams['model']['regression'])
self.default_node_vector = torch.distributions.uniform.Uniform(-1.0 / math.sqrt(self.node_dim),
1.0 / math.sqrt(self.node_dim)).sample_n(self.node_dim).cuda()
def forward(self,
node,
edge,
edge_index,
node_index,
coupling_index,
bond_type,
x_atomic,):
num_node, node_dim = node.shape
num_edge, edge_dim = edge.shape
#--- Build the graph representation using MPNN
# Process nodes representation
if self.encoding == 'one_hot':
node = self.preprocess(node)
elif self.encoding == 'label':
node_cat, node_cont = node[:,:6].long(), node[:,-1].view(-1,1).float()
node = self.preprocess(node_cat, node_cont)
# T-steps of message updates
for i in range(self.num_propagate):
# node <- h_v^t
messages = self.message_function(node, edge_index, edge, reuse_graph_tensors=(i != 0)) # m_v^t+1 = sum_w(E_vw * h_vw^t)
node = self.update_function(messages, node) # h_v^t+1 = GRU(m_v^t+1, h_v^t)
# K-steps of readout function
pool = self.readout(node, node_index)
#--- Get indices of the coupling atoms
num_coupling = len(coupling_index)
coupling_atom0_index, coupling_atom1_index, coupling_atom2_index, coupling_atom3_index, coupling_type_index, coupling_batch_index = \
torch.split(coupling_index,1,dim=1)
#Concatenate the graph representation vecotr 'pool',
pool = torch.index_select( pool, dim=0, index=coupling_batch_index.view(-1))
#pad random unseen node vector to node matrix
node = torch.cat([self.default_node_vector.view(1, -1), node], dim=0)
# build node's embedding sequence
node0 = torch.index_select( node, dim=0, index=coupling_atom0_index.view(-1)+1).unsqueeze(1)
node1 = torch.index_select( node, dim=0, index=coupling_atom1_index.view(-1)+1).unsqueeze(1)
node2 = torch.index_select( node, dim=0, index=coupling_atom2_index.view(-1)+1).unsqueeze(1)
node3 = torch.index_select( node, dim=0, index=coupling_atom3_index.view(-1)+1).unsqueeze(1)
node_seq = torch.cat([node0, node1, node2, node3], dim=1) # bs x 4 x node_dim
#attention_node_seq = self.rnn_attention(node_seq, bond_type.view(-1, 4, 1), )
attention_node_seq = self.rnn_attention(node_seq, bond_type.view(-1, 4, 1), x_atomic.view(-1, 4, 1))
dense_representation = self.dense_layer(torch.cat([pool, attention_node_seq],-1))
self.pool = pool
#--- Get the regression predictions w.r.t the coupling type
if self.num_output ==1:
predict = self.predict(dense_representation)
elif self.num_output==5:
# get 5 dim prediction vector for each type : num_targets = 8
preds = [self.predicition_layers[i](dense_representation).view(-1, 1, 5) for i in range(8)]
predict = torch.cat(preds, dim=1)
#---Get the outputs : coupling_preds, contribution_preds, type_classes :
#w.r.t the two flags : num_output (1: scalar vs 5: scalar+contribution) & predict_type: False (use the actual type) Vs True (predict the type)
predict_type = []
contribution_preds = []
if self.predict_type:
predict_type = self.classify(dense_representation)
if self.num_output == 5:
contribution_preds = predict[:,:, 1:]
coupling_preds = predict[:,:, 0]
else:
coupling_preds = (self.y_range[1]-self.y_range[0]) * torch.sigmoid(predict) + self.y_range[0]
elif self.num_output==1:
coupling_preds =torch.gather(predict, 1, coupling_type_index).view(-1)
coupling_preds = (self.y_range[1]-self.y_range[0]) * torch.sigmoid(coupling_preds) + self.y_range[0]
elif self.num_output==5:
predict = torch.gather(predict, 1, coupling_type_index.view(-1, 1, 1).expand(predict.size(0), 1,
predict.size(2))).squeeze()
contribution_preds = predict[:,1:].view(-1, 4)
coupling_preds = predict[:,0].view(-1)
return [coupling_preds, contribution_preds, predict_type]
| 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/champs-scalar-coupling/tree.md | .
├── build_data
│ ├── __init__.py
│ ├── lib
│ │ ├── include.py
│ │ ├── __init__.py
│ │ ├── net
│ │ │ ├── __init__.py
│ │ │ ├── __pycache__
│ │ │ │ ├── __init__
│ │ │ └── rate.py
│ │ └── utility
│ │ ├── draw.py
│ │ ├── file.py
│ │ ├── __init__.py
│ ├── common.py
│ ├── data-cudf.py
│ ├── data.py
│ ├── parallel_process.py
│ ├── atom_features.py
│ ├── baseline_node_frame_from_csv.ipynb
│ ├── build_baseline_dataframes.ipynb
│ ├── build_preds_from_checkpoints.ipynb
│ ├── build_stack_train_validation.ipynb
│ ├── build_train_validation.ipynb
│ ├── build_train_validation_rnn.ipynb
│ ├── build_train_validation_rnn_per_type.ipynb
│ ├── build_train_validation_rnn_scalar.ipynb
├── experiments
│ ├── \*.yaml
├── merge_predictions_per_type.ipynb
├── models
│ ├── \*.pth
├── mpnn_model
│ ├── __init__.py
│ ├── lib
│ │ ├── include.py
│ │ ├── __init__.py
│ │ ├── net
│ │ │ ├── __init__.py
│ │ │ ├── __pycache__
│ │ │ │ ├── __init__
│ │ │ └── rate.py
│ │ └── utility
│ │ ├── draw.py
│ │ ├── file.py
│ │ ├── __init__.py
│ ├── build_predictions.py
│ ├── callback.py
│ ├── common_constants.py
│ ├── common_model.py
│ ├── common.py
│ ├── data_collate.py
│ ├── data.py
│ ├── dataset.py
│ ├── GaussRank.py
│ ├── helpers.py
│ ├── message_passing.py
│ ├── model.py
│ ├── parallel_process.py
│ ├── radam.py
│ ├── regression_head.py
│ ├── RNN_attention.py
│ └── train_loss.py
├── pre_trained_models
│ ├── \*.pth
├── scripts
│ ├── bootsrap_train_mpnn_rnn.py
│ ├── train_mpnn.py
│ ├── train_mpnn_rnn.py
│ └── train_type.py
├── train_MPNN_RNN.ipynb
├── train_MPNN_RNN_SINGLE_TYPE.ipynb
├── save_pretrained_single_models.ipynb
├── README.md
└── tree.md | 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/champs-scalar-coupling/README.md | Placeholder for our 33rd place solution.
| 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/champs-scalar-coupling/train_MPNN_RNN_BOOTSTRAP.ipynb | import os
GPU_id = 2
os.environ['CUDA_VISIBLE_DEVICES'] = str(GPU_id)
from fastai.basic_train import *
from fastai.callbacks import SaveModelCallback
from functools import partial
import cudf as gd
import warnings
import glob
import gzip
from torch.utils.dlpack import from_dlpack
from mpnn_model.common import *
from mpnn_model.common_constants import *
from mpnn_model.dataset import TensorBatchDataset, BatchDataBunch, BatchDataLoader
from mpnn_model.data_collate import tensor_collate_rnn
from mpnn_model.GaussRank import GaussRankMap
from mpnn_model.helpers import load_cfg
from mpnn_model.model import Net
from mpnn_model.train_loss import train_criterion, lmae_criterion
from mpnn_model.callback import get_reverse_frame, lmae, LMAE
from mpnn_model.radam import *
from mpnn_model.build_predictions import do_test
from mpnn_model.helpers import *
warnings.filterwarnings("ignore")
# load config dict
cfg = load_cfg('/rapids/notebooks/srabhi/champs-2019/CherKeng_solution/fastai_code/experiments/MPNN_RNN_PREDICT_TYPE_LMAE_GAUSSRANK_BOOTSTRAP.yaml')COUPLING_MAX = 136fold = 1print('\n Load GaussRank mapping for fold %s' %fold)
data_dir = DATA_DIR + '/rnn_parquet'
files = glob.glob(data_dir+'/fold_%s/'%fold+'*.csv')
mapping_frames = ['']*8
coupling_order = ['']*8
for file in files:
type_ = file.split('/')[-1].split('_')[2]
order = int(file.split('/')[-1].split('_')[-1].strip('.csv'))
coupling_order[order] = type_
mapping_frames[order] = pd.read_csv(file)
grm = GaussRankMap(mapping_frames, coupling_order)from time import time
def build_test_data(fold, grm, coupling_frame, molecule_edge, molecule_node):
# Transform test predictions to gaussrank
df_test = coupling_frame[['coupling_type', 'scalar_coupling_constant']]
df_test.columns = ['type', 'scalar_coupling_constant']
# Reverse type mapping
df_test.type = df_test.type.map(REVERSE_COUPLING_TYPE)
#fit grm
t0 = time()
transformed_test = grm.convert_df(df_test, from_coupling=True)
coupling_frame['gaussrank_coupling'] = transformed_test
print('\nGetting gaussrank transformation for test data took %s seconds\n' %(time()-t0))
# Build molecule coupling frame for fold
coupling_cols = ['atom_index_0', 'atom_index_1', 'coupling_type', 'scalar_coupling_constant', 'gaussrank_coupling',
'fc', 'sd', 'pso', 'dso', 'id',
'path_index_0', 'path_index_1', 'path_index_2','path_index_3',
'path_btype_0', 'path_btype_1', 'path_btype_2',
'path_a_num_0', 'path_a_num_1', 'path_a_num_2', 'path_a_num_3']
shared_cols = ['molecule_name', 'num_coupling', 'coupling_dim']
tmp = coupling_frame.groupby('molecule_name').apply(lambda x: x[coupling_cols].values.reshape(-1))
molecule_coupling = pd.DataFrame(tmp.values.tolist())
# pad coupling_max from 132 to 136
COUPLING_MAX = 136
pad_cols = 21*5
d = dict.fromkeys([str(i) for i in range(molecule_coupling.shape[1], molecule_coupling.shape[1]+pad_cols)], 0.0)
molecule_coupling = molecule_coupling.assign(**d).fillna(0.0)
molecule_coupling['molecule_name'] = tmp.index
molecule_coupling = molecule_coupling.merge(coupling_frame[shared_cols].drop_duplicates(), on='molecule_name', how='left')
cols = molecule_coupling.columns.tolist()
new_cols = cols[-3:] + cols[:-3]
molecule_coupling = molecule_coupling[new_cols]
molecule_coupling.columns = ['molecule_name', 'num_coupling', 'coupling_dim'] + ['coupling_%s'%i for i in range(COUPLING_MAX*21)]
node_edge_frame = pd.merge(molecule_node, molecule_edge, on='molecule_name', how='left')
general_stack_frame = pd.merge(node_edge_frame, molecule_coupling, on='molecule_name', how='left')
return general_stack_frameDATA_DIR = cfg['dataset']['input_path']
best_pred_file_path = '/datasets/trivago/champs-2019/output/sub_2019-08-27-17-20-58_lmae_-2.3194.csv.gz'
fold = 1%%time
print('get test pseudo labels')
test= pd.read_csv(DATA_DIR+'/csv/test.csv')
id_test = test.id.values
mol_test = test.molecule_name.values
molecule_edge = pd.read_parquet(DATA_DIR+'/parquet/molecule_edge.parquet')
molecule_edge = molecule_edge[molecule_edge.molecule_name.isin(mol_test)]
molecule_node = pd.read_parquet(DATA_DIR+'/parquet/molecule_node.parquet')
molecule_node = molecule_node[molecule_node.molecule_name.isin(mol_test)]
coupling_frame = pd.read_parquet(DATA_DIR+'/parquet/baseline_rnn_coupling_frame.parquet')
coupling_frame = coupling_frame[coupling_frame.molecule_name.isin(mol_test)]
with gzip.open(best_pred_file_path) as f:
best_stack_test = pd.read_csv(f)
coupling_frame = coupling_frame.merge(best_stack_test, on = 'id', how='left')
test_frame = build_test_data(fold, grm, coupling_frame, molecule_edge, molecule_node)
print('\n Load Train/Validation features for fold %s' %fold)
validation = gd.read_parquet(DATA_DIR +'/rnn_parquet/fold_%s/validation.parquet'%fold)
train = gd.read_parquet(DATA_DIR +'/rnn_parquet/fold_%s/train.parquet' %fold)
# transform pandas to cudf
print('\n Define new train with test observations' )
test = gd.from_pandas(test_frame)
train = gd.concat([train, test])
del testbatch_size = cfg['train']['batch_size']num_nodes_tensor = from_dlpack(train['num_nodes'].to_dlpack()).long()
num_edges_tensor = from_dlpack(train['num_edge'].to_dlpack()).long()
num_coupling_tensor = from_dlpack(train['num_coupling'].to_dlpack()).long()
node_cols = [i for i in train.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(train[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in train.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(train[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in train.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(train[coupling_cols].to_dlpack()).type(torch.float32)
mol_train = train.molecule_name.unique().to_pandas().values
train_dataset = TensorBatchDataset(mol_train,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='train',
csv='train')
del train
# convert validation to tensors
print('** Convert validation tensors **\n')
num_nodes_tensor = from_dlpack(validation['num_nodes'].to_dlpack()).long()
num_edges_tensor = from_dlpack(validation['num_edge'].to_dlpack()).long()
num_coupling_tensor = from_dlpack(validation['num_coupling'].to_dlpack()).long()
node_cols = [i for i in validation.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(validation[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in validation.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(validation[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in validation.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(validation[coupling_cols].to_dlpack()).type(torch.float32)
mol_valid = validation.molecule_name.unique().to_pandas().values
valid_dataset = TensorBatchDataset(mol_valid,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='train',
csv='train')
del validation data = BatchDataBunch.create(train_dataset, valid_dataset, device='cuda', bs=batch_size)net = Net(cfg, y_range=[-36.2186, 204.8800])#### Init Fastai learner
loss_name = cfg['train']['loss_name']
num_output = cfg['model']['regression']['num_output']
predict_type = cfg['model']['regression']['predict_type']
gaussrank = cfg['dataset']['gaussrank']
print('\tCriterion: %s\n'%(loss_name))
### Get GaussRank mapping
print('\n Load GaussRank mapping')
data_dir = DATA_DIR + '/rnn_parquet'
normalize = cfg['dataset']['normalize']
files = glob.glob(data_dir+'/fold_%s/'%fold+'*.csv')
mapping_frames = ['']*8
coupling_order = ['']*8
for file in files:
type_ = file.split('/')[-1].split('_')[2]
order = int(file.split('/')[-1].split('_')[-1].strip('.csv'))
coupling_order[order] = type_
mapping_frames[order] = pd.read_csv(file)
grm = GaussRankMap(mapping_frames, coupling_order)
optal = partial(RAdam)
learn = Learner(data,
net.cuda(),
metrics=None,
opt_func=optal,
callback_fns=partial(LMAE,
grm=grm,
predict_type=predict_type,
normalize_coupling=normalize,
coupling_rank=gaussrank))
learn.loss_func = partial(train_criterion,
criterion=loss_name,
num_output=num_output,
gaussrank=gaussrank,
pred_type=predict_type)
print('\tTraining loss: %s\n'%(learn.loss_func))
#### fit one cycle
epochs = cfg['train']['epochs']
max_lr = cfg['train']['max_lr']learn.fit_one_cycle(1,
0.005,
callbacks=[SaveModelCallback(learn,
every='improvement',
monitor='LMAE',
name=cfg['train']['model_name']+'_fold_%s'%fold,
mode='min')])torch.cuda.empty_cache()valid_dataset = TensorBatchDataset(mol_valid,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='test',
csv='train')
valid_loader = BatchDataLoader(valid_dataset,
shuffle=False,
pin_memory=False,
drop_last=False,
device='cuda')
valid_dataset.get_total_samples()
print('compute the validation predictions ')
valid_loss, reverse_frame, contributions, molecule_representation = do_test(learn.model,
valid_loader,
valid_dataset.total_samples,
1,
predict_type,
grm,
normalize=normalize,
gaussrank=gaussrank)
print('\n')
print('|------------------------------------ VALID ------------------------------------------------|\n')
print('| 1JHC, 2JHC, 3JHC, 1JHN, 2JHN, 3JHN, 2JHH, 3JHH | loss mae log_mae | fold |\n')
print('|-------------------------------------------------------------------------------------------|\n')
print('|%+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f | %+5.3f %5.2f %+0.2f | %s |\n' %(*valid_loss[:11], fold))DATA_DIR = cfg['dataset']['input_path']
batch_size = cfg['train']['batch_size']print('load test data')
torch.cuda.empty_cache()
test = gd.read_parquet(DATA_DIR +'/rnn_parquet/test.parquet')
num_nodes_tensor = from_dlpack(test['num_nodes'].to_dlpack())
num_edges_tensor = from_dlpack(test['num_edge'].to_dlpack())
num_coupling_tensor = from_dlpack(test['num_coupling'].to_dlpack())
node_cols = [i for i in test.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(test[node_cols].to_dlpack())
nodes_matrix = from_dlpack(test[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in test.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(test[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in test.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(test[coupling_cols].to_dlpack()).type(torch.float32)
mol_test = test.molecule_name.unique().to_pandas().values
del test
test_dataset = TensorBatchDataset(mol_test,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='test',
csv='test')
test_loader = BatchDataLoader(test_dataset,
shuffle=False,
pin_memory=False,
drop_last=False,
device='cuda')
print('\n Compute predictions for test data at fold %s\n' %fold)
test_loss, preds_fold_test, contributions, molecule_representation = do_test(learn.model,
test_loader,
cfg['train']['test_shape'],
1,
predict_type,
grm,
normalize=False,
gaussrank=gaussrank)val_loss = valid_loss[-1]
print('\n Save Validation frame' )
out_dir = '/rapids/notebooks/srabhi/champs-2019/output'
clock = "{}".format(datetime.now()).replace(' ','-').replace(':','-').split('.')[0]
output_name = out_dir + '/submit/scalar_output/cv_%s_%s_%.4f_fold_%s.csv.gz'%(clock, loss_name, val_loss, fold)
reverse_frame.to_csv(output_name, index=False,compression='gzip')# save test predictions
print('\n Save Test frame' )
out_dir = cfg['dataset']['output_path']
clock = "{}".format(datetime.now()).replace(' ','-').replace(':','-').split('.')[0]
output_name = out_dir + '/submit/scalar_output/sub_%s_%s_%.4f_fold_%s.csv.gz'%(clock, loss_name, val_loss, fold)
preds_fold_test.to_csv(output_name, index=False,compression='gzip') | 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/champs-scalar-coupling/train_MPNN.ipynb | import os
GPU_id = 0
os.environ['CUDA_VISIBLE_DEVICES'] = str(GPU_id)
from fastai.basic_train import *
from fastai.callbacks import SaveModelCallback
from functools import partial
from torch.utils.dlpack import from_dlpack
import cudf as gd
import warnings
import glob
from mpnn_model.common import *
from mpnn_model.common_constants import *
from mpnn_model.dataset import TensorBatchDataset, BatchDataBunch, BatchDataLoader
from mpnn_model.data_collate import tensor_collate_baseline
from mpnn_model.GaussRank import GaussRankMap
from mpnn_model.helpers import load_cfg
from mpnn_model.model import Net
from mpnn_model.train_loss import train_criterion, lmae_criterion
from mpnn_model.callback import get_reverse_frame, lmae, LMAE
from mpnn_model.radam import *
from mpnn_model.build_predictions import do_test
from mpnn_model.helpers import *
warnings.filterwarnings("ignore")
# load config dict
cfg = load_cfg('/rapids/notebooks/srabhi/champs-2019/CherKeng_solution/fastai_code/experiments/MPNN_EMBED_TYPE_LMAE_WO_GAUSSRANK.yaml')COUPLING_MAX = 136DATA_DIR = cfg['dataset']['input_path']
fold = 1%%time
validation = gd.read_parquet(DATA_DIR +'/parquet/fold_%s/validation.parquet'%fold)
train = gd.read_parquet(DATA_DIR +'/parquet/fold_%s/train.parquet' %fold)batch_size = cfg['train']['batch_size']num_nodes_tensor = from_dlpack(train['num_nodes'].to_dlpack()).long()
num_edges_tensor = from_dlpack(train['num_edge'].to_dlpack()).long()
num_coupling_tensor = from_dlpack(train['num_coupling'].to_dlpack()).long()
node_cols = [i for i in train.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(train[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in train.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(train[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in train.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(train[coupling_cols].to_dlpack()).type(torch.float32)
mol_train = train.molecule_name.unique().to_pandas().values
train_dataset = TensorBatchDataset(mol_train,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_baseline,
COUPLING_MAX=COUPLING_MAX,
mode='train',
csv='train')
del train
# convert validation to tensors
print('** Convert validation tensors **\n')
num_nodes_tensor = from_dlpack(validation['num_nodes'].to_dlpack()).long()
num_edges_tensor = from_dlpack(validation['num_edge'].to_dlpack()).long()
num_coupling_tensor = from_dlpack(validation['num_coupling'].to_dlpack()).long()
node_cols = [i for i in validation.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(validation[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in validation.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(validation[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in validation.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(validation[coupling_cols].to_dlpack()).type(torch.float32)
mol_valid = validation.molecule_name.unique().to_pandas().values
valid_dataset = TensorBatchDataset(mol_valid,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_baseline,
COUPLING_MAX=COUPLING_MAX,
mode='train',
csv='train')
del validation data = BatchDataBunch.create(train_dataset, valid_dataset, device='cuda', bs=batch_size)net = Net(cfg, y_range=[-36.2186, 204.8800])#### Init Fastai learner
loss_name = cfg['train']['loss_name']
num_output = cfg['model']['regression']['num_output']
predict_type = cfg['model']['regression']['predict_type']
gaussrank = cfg['dataset']['gaussrank']
print('\tCriterion: %s\n'%(loss_name))
### Get GaussRank mapping
print('\n Load GaussRank mapping')
data_dir = DATA_DIR + '/rnn_parquet'
normalize = cfg['dataset']['normalize']
files = glob.glob(data_dir+'/fold_%s/'%fold+'*.csv')
mapping_frames = ['']*8
coupling_order = ['']*8
for file in files:
type_ = file.split('/')[-1].split('_')[2]
order = int(file.split('/')[-1].split('_')[-1].strip('.csv'))
coupling_order[order] = type_
mapping_frames[order] = pd.read_csv(file)
grm = GaussRankMap(mapping_frames, coupling_order)
optal = partial(RAdam)
learn = Learner(data,
net.cuda(),
metrics=None,
opt_func=optal,
callback_fns=partial(LMAE,
grm=grm,
predict_type=predict_type,
normalize_coupling=normalize,
coupling_rank=gaussrank))
learn.loss_func = partial(train_criterion,
criterion=loss_name,
num_output=num_output,
gaussrank=gaussrank,
pred_type=predict_type)
print('\tTraining loss: %s\n'%(learn.loss_func))
#### fit one cycle
epochs = cfg['train']['epochs']
max_lr = cfg['train']['max_lr']learn.fit_one_cycle(1,
0.005,
callbacks=[SaveModelCallback(learn,
every='improvement',
monitor='LMAE',
name=cfg['train']['model_name']+'_fold_%s'%fold,
mode='min')])torch.cuda.empty_cache()valid_dataset = TensorBatchDataset(mol_valid,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_baseline,
COUPLING_MAX=COUPLING_MAX,
mode='test',
csv='train')
valid_loader = BatchDataLoader(valid_dataset,
shuffle=False,
pin_memory=False,
drop_last=False,
device='cuda')
valid_dataset.get_total_samples()
print('compute the validation predictions ')
valid_loss, reverse_frame, contributions, molecule_representation = do_test(learn.model,
valid_loader,
valid_dataset.total_samples,
1,
predict_type,
grm,
normalize=normalize,
gaussrank=gaussrank)
print('\n')
print('|------------------------------------ VALID ------------------------------------------------|\n')
print('| 1JHC, 2JHC, 3JHC, 1JHN, 2JHN, 3JHN, 2JHH, 3JHH | loss mae log_mae | fold |\n')
print('|-------------------------------------------------------------------------------------------|\n')
print('|%+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f | %+5.3f %5.2f %+0.2f | %s |\n' %(*valid_loss[:11], fold))DATA_DIR = cfg['dataset']['input_path']
batch_size = cfg['train']['batch_size']print('load test data')
torch.cuda.empty_cache()
test = gd.read_parquet(DATA_DIR +'/parquet/test.parquet')
num_nodes_tensor = from_dlpack(test['num_nodes'].to_dlpack())
num_edges_tensor = from_dlpack(test['num_edge'].to_dlpack())
num_coupling_tensor = from_dlpack(test['num_coupling'].to_dlpack())
node_cols = [i for i in test.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(test[node_cols].to_dlpack())
nodes_matrix = from_dlpack(test[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in test.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(test[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in test.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(test[coupling_cols].to_dlpack()).type(torch.float32)
mol_test = test.molecule_name.unique().to_pandas().values
del test
test_dataset = TensorBatchDataset(mol_test,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_baseline,
COUPLING_MAX=COUPLING_MAX,
mode='test',
csv='test')
test_loader = BatchDataLoader(test_dataset,
shuffle=False,
pin_memory=False,
drop_last=False,
device='cuda')
print('\n Compute predictions for test data at fold %s\n' %fold)
test_loss, preds_fold_test, contributions, molecule_representation = do_test(learn.model,
test_loader,
cfg['train']['test_shape'],
1,
predict_type,
grm,
normalize=False,
gaussrank=gaussrank)val_loss = valid_loss[-1]
print('\n Save Validation frame' )
out_dir = '/rapids/notebooks/srabhi/champs-2019/output'
clock = "{}".format(datetime.now()).replace(' ','-').replace(':','-').split('.')[0]
output_name = out_dir + '/submit/scalar_output/cv_%s_%s_%.4f_fold_%s.csv.gz'%(clock, loss_name, val_loss, fold)
reverse_frame.to_csv(output_name, index=False,compression='gzip')# save test predictions
print('\n Save Test frame' )
out_dir = cfg['dataset']['output_path']
clock = "{}".format(datetime.now()).replace(' ','-').replace(':','-').split('.')[0]
output_name = out_dir + '/submit/scalar_output/sub_%s_%s_%.4f_fold_%s.csv.gz'%(clock, loss_name, val_loss, fold)
preds_fold_test.to_csv(output_name, index=False,compression='gzip') | 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/champs-scalar-coupling/save_pretrained_single_models.ipynb | import os
GPU_id = 1
os.environ['CUDA_VISIBLE_DEVICES'] = str(GPU_id)
from torch_scatter import *
from torch_geometric.utils import scatter_
import torch
import torch.nn as nn
import numbers
import torch
from torch import _utils
from fastai.torch_core import to_device
import torch.nn.functional as F
from fastai.basic_data import DataBunch
from fastai.basic_data import *
from fastai.tabular import *
from fastai import *
import copy
import warnings
warnings.filterwarnings("ignore")
from mpnn_model.common import *
from mpnn_model.helpers import load_cfg
from mpnn_model.model import *
from mpnn_model.common_constants import *
DATA_DIR = '/rapids/notebooks/srabhi/champs-2019/input'
model_dict = { '1JHC': 'lmae', '2JHC': 'lmae', '3JHC': 'lmae', '3JHH': 'lmae',
'1JHN': 'mlmae' , '2JHN':'mlmae' , '3JHN':'mlmae', '2JHH':'mlmae'}# to modify with the resulting model of self_training
best_all_type_model_lmae = './models/mpnn_gauss_rank_LMAE_2CE_RNN_NODE_TYPE_ATOMIC_SEQ_BOOTSTRAP_V2_'
best_all_type_model_mlmae = './models/mpnn_gauss_rank_MLMAE_2CE_RNN_NODE_TYPE_ATOMIC_SEQ_BOOTSTRAP_V2_'def save_model(fold, coupling_type, loss_criterion='lmae'):
cfg = load_cfg('./experiments/MPNN_RNN_MAE_WO_GAUSSRANK_SINGLE_TYPE.yaml')
if loss_criterion == 'lmae':
pretrained_dict = torch.load(best_all_type_model_lmae+'_fold_%s_final_save.pth'%fold).state_dict()
else:
pretrained_dict = torch.load(best_all_type_model_lmae+'_fold_%s_final_save.pth'%fold).state_dict()
ind = COUPLING_TYPE.index(coupling_type)
net= Net(cfg, y_range=[COUPLING_MIN_[ind], COUPLING_MAX_[ind]])
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict and not k.startswith('predict')}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
net.load_state_dict(pretrained_dict, strict=False)
# save model
torch.save(net, 'pre_trained_models/' + 'coupling_%s_%s_fold_%s_wo_gaussrank.pth'%(coupling_type, loss_criterion, fold))for fold in range(4):
for type_, criterion in model_dict.items():
save_model(fold, type_, criterion)def save_model(fold, coupling_type, loss_criterion='lmae'):
cfg = load_cfg('/rapids/notebooks/srabhi/champs-2019/CherKeng_solution/fastai_code/experiments/MPNN_RNN_MAE_GAUSSRANK_SINGLE_TYPE.yaml')
if loss_criterion == 'lmae':
pretrained_dict = torch.load('models/mpnn_gauss_rank_LMAE_2CE_RNN_NODE_TYPE_ATOMIC_SEQ_BOOTSTRAP_V2__fold_%s_final_save.pth'%fold).state_dict()
else:
pretrained_dict = torch.load('models/mpnn_gauss_rank_MLMAE_2CE_RNN_NODE_TYPE_ATOMIC_SEQ_BOOTSTRAP_V2__fold_%s_final_save.pth'%fold).state_dict()
ind = COUPLING_TYPE.index(coupling_type)
net= Net(cfg, y_range=[-2.326753765513524, 2.3267537655135464])
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict and not k.startswith('predict')}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
net.load_state_dict(pretrained_dict, strict=False)
# save model
torch.save(net, 'pre_trained_models/' + 'coupling_%s_%s_fold_%s_gaussrank.pth'%(coupling_type, loss_criterion, fold))for fold in range(4):
for type_, criterion in model_dict.items():
save_model(fold, type_, criterion) | 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/champs-scalar-coupling/train_MPNN_RNN_SINGLE_TYPE.ipynb | import os
GPU_id = 0
os.environ['CUDA_VISIBLE_DEVICES'] = str(GPU_id)
from mpnn_model.common import *
from mpnn_model.common_constants import *
from mpnn_model.dataset import TensorBatchDataset, BatchDataBunch, BatchDataLoader
from mpnn_model.data_collate import tensor_collate_rnn
from mpnn_model.GaussRank import GaussRankMap
from mpnn_model.helpers import load_cfg
from mpnn_model.model import Net
from mpnn_model.train_loss import train_criterion, lmae_criterion
from mpnn_model.callback import get_reverse_frame, lmae, LMAE
from mpnn_model.radam import *
from mpnn_model.build_predictions import do_test
from mpnn_model.helpers import *
# Fast ai
from fastai.tabular import *
from fastai.basic_data import DataBunch
from fastai.basic_data import *
from fastai.callbacks import SaveModelCallback
from fastai import *
import cudf as gd
import numpy as np
import pandas as pd
from torch.utils.dlpack import from_dlpack
import torch
from torch import _utils
from fastai.torch_core import to_device
import torch.nn.functional as F
from timeit import default_timer as timer
from datetime import datetime
from time import time
from functools import partial
import glob
import warnings
warnings.filterwarnings("ignore") from scripts.train_type import *
from mpnn_model.common_constants import COUPLING_TYPEmodel_dict = { '1JHC': 'lmae', '2JHC': 'lmae', '3JHC': 'lmae', '3JHH': 'lmae',
'1JHN': 'mlmae' , '2JHN':'mlmae' , '3JHN':'mlmae', '2JHH':'mlmae'}COUPLING_TYPENUM_TARGET = 1 cfg ='/rapids/notebooks/srabhi/champs-2019/CherKeng_solution/fastai_code/experiments/MPNN_RNN_MAE_WO_GAUSSRANK_SINGLE_TYPE.yaml'
fold = 1
type_='3JHH'
COUPLING_MAX = COUPLING_MAX_DICT[type_]cfg = load_cfg(cfg)
DATA_DIR = cfg['dataset']['input_path']
normalize = cfg['dataset']['normalize']
gaussrank= cfg['dataset']['gaussrank']
model_name = cfg['train']['model_name']
model_name = model_name+ '_fold_%s' %fold
batch_size = cfg['train']['batch_size']
predict_type = cfg['train']['predict_type']
loss_name = cfg['train']['loss_name']
predict_type = cfg['model']['regression']['predict_type']
epochs = cfg['train']['epochs']
max_lr = cfg['train']['max_lr']
device = cfg['train']['device']%%time
test= pd.read_csv(DATA_DIR+'/csv/test.csv')
id_test = test.id.values
mol_test = test.molecule_name.values
print('\n Load Train/Validation features for fold %s' %fold)
validation = gd.read_parquet(DATA_DIR +'/rnn_parquet/fold_%s/%s/validation.parquet'%(fold, type_))
train = gd.read_parquet(DATA_DIR +'/rnn_parquet/fold_%s/%s/train.parquet' %(fold, type_))
print('\n Get In-memory Tensor ')
# Convert train to tensors
num_nodes_tensor = from_dlpack(train['num_nodes'].to_dlpack()).long()
num_edges_tensor = from_dlpack(train['num_edge'].to_dlpack()).long()
num_coupling_tensor = from_dlpack(train['num_coupling'].to_dlpack()).long()
node_cols = [i for i in train.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(train[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in train.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(train[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in train.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(train[coupling_cols].to_dlpack()).type(torch.float32)
mol_train = train.molecule_name.unique().to_pandas().values
train_dataset = TensorBatchDataset(mol_train,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='train',
csv='train')
# convert validation to tensors
num_nodes_tensor = from_dlpack(validation['num_nodes'].to_dlpack()).long()
num_edges_tensor = from_dlpack(validation['num_edge'].to_dlpack()).long()
num_coupling_tensor = from_dlpack(validation['num_coupling'].to_dlpack()).long()
node_cols = [i for i in validation.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(validation[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in validation.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(validation[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in validation.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(validation[coupling_cols].to_dlpack()).type(torch.float32)
mol_valid = validation.molecule_name.unique().to_pandas().values
valid_dataset = TensorBatchDataset(mol_valid,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='train',
csv='train')
del train
del validation
data = BatchDataBunch.create(train_dataset, valid_dataset, device=device, bs=batch_size)pretrain_model = model_dict[type_]
freeze_cycle = 1
unfreeze_cycle = 1 if not gaussrank:
net = torch.load('pre_trained_models/coupling_%s_%s_fold_%s_wo_gaussrank.pth'%(type_, pretrain_model, fold))
else:
net = torch.load('pre_trained_models/coupling_%s_%s_fold_%s_gaussrank.pth'%(type_, pretrain_model, fold))
# load grm :
data_dir = DATA_DIR + '/rnn_parquet'
file = glob.glob(data_dir+'/fold_%s/'%fold+'%s/*.csv'%type_)[0]
coupling_order = [type_]
mapping_frames = [pd.read_csv(file)]
grm = GaussRankMap(mapping_frames, coupling_order)
############################------------- Fine tune training ---------------################################
optal = partial(RAdam)
learn = Learner(data,
net,
metrics=None,
opt_func=optal,
callback_fns=partial(LMAE,
grm=grm,
predict_type=predict_type,
normalize_coupling=normalize,
coupling_rank=gaussrank))
learn.loss_func = lmae_criterion
learn.split([[learn.model.preprocess,learn.model.message_function, learn.model.update_function, learn.model.readout],
[learn.model.rnn_attention],[learn.model.dense_layer, learn.model.predict]])
learn.lr_range(slice(1e-3))
learn.freeze()
learn.fit_one_cycle(freeze_cycle, callbacks=[SaveModelCallback(learn,
every='improvement',
monitor='LMAE',
name=cfg['train']['model_name']+'_fold_%s_frozen_type_%s_'%(fold, type_),
mode='min')])
learn.unfreeze()
learn.fit_one_cycle(unfreeze_cycle, max_lr=max_lr, callbacks=[SaveModelCallback(learn,
every='improvement',
monitor='LMAE',
name=cfg['train']['model_name']+'_fold_%s_pretrained_%s_'%(fold, type_),
mode='min')])valid_dataset = TensorBatchDataset(mol_valid,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='test',
csv='train')
valid_loader = BatchDataLoader(valid_dataset,
shuffle=False,
pin_memory=False,
drop_last=False,
device='cuda')
print('\n Compute predictions for validation data at fold %s\n' %fold)
valid_loss, reverse_frame, contributions, molecule_representation = do_test(learn.model,
valid_loader,
1,
1,
predict_type,
grm,
normalize=normalize,
gaussrank=gaussrank)
val_loss = valid_loss[-3]
print('\nValidation loss is : %s' %val_loss)
print('\nSave model to disk')
torch.save(learn.model, 'models/' + cfg['train']['model_name'] + '_fold_%s_final_save.pth'%fold)
print('load test data')
torch.cuda.empty_cache()
test = gd.read_parquet(DATA_DIR +'/rnn_parquet/test_%s.parquet'%type_)
num_nodes_tensor = from_dlpack(test['num_nodes'].to_dlpack())
num_edges_tensor = from_dlpack(test['num_edge'].to_dlpack())
num_coupling_tensor = from_dlpack(test['num_coupling'].to_dlpack())
node_cols = [i for i in test.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(test[node_cols].to_dlpack())
nodes_matrix = from_dlpack(test[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in test.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(test[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in test.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(test[coupling_cols].to_dlpack()).type(torch.float32)
mol_test = test.molecule_name.unique().to_pandas().values
#batch_node, batch_edge, batch_coupling, batch_graussrank, batch_num_node, batch_num_edge, batch_num_coupling
del test
test_dataset = TensorBatchDataset(mol_test,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='test',
csv='test')
test_loader = BatchDataLoader(test_dataset,
shuffle=False,
pin_memory=False,
drop_last=False,
device='cuda')
print('\n Compute predictions for test data at fold %s\n' %fold)
test_loss, preds_fold_test, contributions, molecule_representation = do_test(learn.model,
valid_loader,
1,
1,
predict_type,
grm,
normalize=normalize,
gaussrank=gaussrank)reverse_frame.head(4)OUT_DIR = cfg['dataset']['output_path']
num_output = cfg['model']['regression']['num_output']
if num_output == 1:
out_dir = OUT_DIR + '/submit/scalar_output/'
# init preditions arrays
pred_cv = np.zeros( cfg['train']['train_shape'])
pred_sub = np.zeros(cfg['train']['test_shape'])
elif num_output == 5:
out_dir = OUT_DIR + '/submit/multi_output/'
pred_cv = np.zeros((cfg['train']['train_shape'], 5))
pred_sub = np.zeros((cfg['train']['test_shape'], 5))print('\n Save Validation frame' )
clock = "{}".format(datetime.now()).replace(' ','-').replace(':','-').split('.')[0]
output_name = out_dir + '/cv_%s_%s_%.4f_type_%s_fold_%s.csv.gz'%(clock, pretrain_model, val_loss, type_, fold)
reverse_frame.to_csv(output_name, index=False,compression='gzip')
# save test predictions
print('\n Save Test frame' )
clock = "{}".format(datetime.now()).replace(' ','-').replace(':','-').split('.')[0]
output_name = out_dir + '/sub_%s_%s_%.4f_type_%s_fold_%s.csv.gz'%(clock, pretrain_model, val_loss, type_, fold)
preds_fold_test.to_csv(output_name, index=False,compression='gzip')
net=None
torch.cuda.empty_cache() | 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/champs-scalar-coupling/README_old.md | Placeholder for our 33rd place solution.
| 0 |
rapidsai_public_repos/deeplearning | rapidsai_public_repos/deeplearning/champs-scalar-coupling/train_MPNN_RNN.ipynb | import os
GPU_id = 2
os.environ['CUDA_VISIBLE_DEVICES'] = str(GPU_id)
from fastai.basic_train import *
from fastai.callbacks import SaveModelCallback
from functools import partial
from torch.utils.dlpack import from_dlpack
import cudf as gd
import warnings
import glob
from mpnn_model.common import *
from mpnn_model.common_constants import *
from mpnn_model.dataset import TensorBatchDataset, BatchDataBunch, BatchDataLoader
from mpnn_model.data_collate import tensor_collate_rnn
from mpnn_model.GaussRank import GaussRankMap
from mpnn_model.helpers import load_cfg
from mpnn_model.model import Net
from mpnn_model.train_loss import train_criterion, lmae_criterion
from mpnn_model.callback import get_reverse_frame, lmae, LMAE
from mpnn_model.radam import *
from mpnn_model.build_predictions import do_test
from mpnn_model.helpers import *
warnings.filterwarnings("ignore")
# load config dict
cfg = load_cfg('/rapids/notebooks/srabhi/champs-2019/CherKeng_solution/fastai_code/experiments/MPNN_RNN_PREDICT_TYPE_LMAE_WO_GAUSSRANK.yaml')COUPLING_MAX = 136DATA_DIR = cfg['dataset']['input_path']
fold = 1%%time
validation = gd.read_parquet(DATA_DIR +'/rnn_parquet/fold_%s/validation.parquet'%fold)
train = gd.read_parquet(DATA_DIR +'/rnn_parquet/fold_%s/train.parquet' %fold)batch_size = cfg['train']['batch_size']num_nodes_tensor = from_dlpack(train['num_nodes'].to_dlpack()).long()
num_edges_tensor = from_dlpack(train['num_edge'].to_dlpack()).long()
num_coupling_tensor = from_dlpack(train['num_coupling'].to_dlpack()).long()
node_cols = [i for i in train.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(train[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in train.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(train[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in train.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(train[coupling_cols].to_dlpack()).type(torch.float32)
mol_train = train.molecule_name.unique().to_pandas().values
train_dataset = TensorBatchDataset(mol_train,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='train',
csv='train')
del train
# convert validation to tensors
print('** Convert validation tensors **\n')
num_nodes_tensor = from_dlpack(validation['num_nodes'].to_dlpack()).long()
num_edges_tensor = from_dlpack(validation['num_edge'].to_dlpack()).long()
num_coupling_tensor = from_dlpack(validation['num_coupling'].to_dlpack()).long()
node_cols = [i for i in validation.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(validation[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in validation.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(validation[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in validation.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(validation[coupling_cols].to_dlpack()).type(torch.float32)
mol_valid = validation.molecule_name.unique().to_pandas().values
valid_dataset = TensorBatchDataset(mol_valid,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='train',
csv='train')
del validation data = BatchDataBunch.create(train_dataset, valid_dataset, device='cuda', bs=batch_size)net = Net(cfg, y_range=[-36.2186, 204.8800])#### Init Fastai learner
loss_name = cfg['train']['loss_name']
num_output = cfg['model']['regression']['num_output']
predict_type = cfg['model']['regression']['predict_type']
gaussrank = cfg['dataset']['gaussrank']
print('\tCriterion: %s\n'%(loss_name))
### Get GaussRank mapping
print('\n Load GaussRank mapping')
data_dir = DATA_DIR + '/rnn_parquet'
normalize = cfg['dataset']['normalize']
files = glob.glob(data_dir+'/fold_%s/'%fold+'*.csv')
mapping_frames = ['']*8
coupling_order = ['']*8
for file in files:
type_ = file.split('/')[-1].split('_')[2]
order = int(file.split('/')[-1].split('_')[-1].strip('.csv'))
coupling_order[order] = type_
mapping_frames[order] = pd.read_csv(file)
grm = GaussRankMap(mapping_frames, coupling_order)
optal = partial(RAdam)
learn = Learner(data,
net.cuda(),
metrics=None,
opt_func=optal,
callback_fns=partial(LMAE,
grm=grm,
predict_type=predict_type,
normalize_coupling=normalize,
coupling_rank=gaussrank))
learn.loss_func = partial(train_criterion,
criterion=loss_name,
num_output=num_output,
gaussrank=gaussrank,
pred_type=predict_type)
print('\tTraining loss: %s\n'%(learn.loss_func))
#### fit one cycle
epochs = cfg['train']['epochs']
max_lr = cfg['train']['max_lr']learn.fit_one_cycle(1,
0.005,
callbacks=[SaveModelCallback(learn,
every='improvement',
monitor='LMAE',
name=cfg['train']['model_name']+'_fold_%s'%fold,
mode='min')])torch.cuda.empty_cache()valid_dataset = TensorBatchDataset(mol_valid,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='test',
csv='train')
valid_loader = BatchDataLoader(valid_dataset,
shuffle=False,
pin_memory=False,
drop_last=False,
device='cuda')
valid_dataset.get_total_samples()
print('compute the validation predictions ')
valid_loss, reverse_frame, contributions, molecule_representation = do_test(learn.model,
valid_loader,
valid_dataset.total_samples,
1,
predict_type,
grm,
normalize=normalize,
gaussrank=gaussrank)
print('\n')
print('|------------------------------------ VALID ------------------------------------------------|\n')
print('| 1JHC, 2JHC, 3JHC, 1JHN, 2JHN, 3JHN, 2JHH, 3JHH | loss mae log_mae | fold |\n')
print('|-------------------------------------------------------------------------------------------|\n')
print('|%+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f | %+5.3f %5.2f %+0.2f | %s |\n' %(*valid_loss[:11], fold))DATA_DIR = cfg['dataset']['input_path']
batch_size = cfg['train']['batch_size']print('load test data')
torch.cuda.empty_cache()
test = gd.read_parquet(DATA_DIR +'/rnn_parquet/test.parquet')
num_nodes_tensor = from_dlpack(test['num_nodes'].to_dlpack())
num_edges_tensor = from_dlpack(test['num_edge'].to_dlpack())
num_coupling_tensor = from_dlpack(test['num_coupling'].to_dlpack())
node_cols = [i for i in test.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(test[node_cols].to_dlpack())
nodes_matrix = from_dlpack(test[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in test.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(test[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in test.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(test[coupling_cols].to_dlpack()).type(torch.float32)
mol_test = test.molecule_name.unique().to_pandas().values
del test
test_dataset = TensorBatchDataset(mol_test,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='test',
csv='test')
test_loader = BatchDataLoader(test_dataset,
shuffle=False,
pin_memory=False,
drop_last=False,
device='cuda')
print('\n Compute predictions for test data at fold %s\n' %fold)
test_loss, preds_fold_test, contributions, molecule_representation = do_test(learn.model,
test_loader,
cfg['train']['test_shape'],
1,
predict_type,
grm,
normalize=False,
gaussrank=gaussrank)val_loss = valid_loss[-1]
print('\n Save Validation frame' )
out_dir = '/rapids/notebooks/srabhi/champs-2019/output'
clock = "{}".format(datetime.now()).replace(' ','-').replace(':','-').split('.')[0]
output_name = out_dir + '/submit/scalar_output/cv_%s_%s_%.4f_fold_%s.csv.gz'%(clock, loss_name, val_loss, fold)
reverse_frame.to_csv(output_name, index=False,compression='gzip')# save test predictions
print('\n Save Test frame' )
out_dir = cfg['dataset']['output_path']
clock = "{}".format(datetime.now()).replace(' ','-').replace(':','-').split('.')[0]
output_name = out_dir + '/submit/scalar_output/sub_%s_%s_%.4f_fold_%s.csv.gz'%(clock, loss_name, val_loss, fold)
preds_fold_test.to_csv(output_name, index=False,compression='gzip') | 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/scripts/train_mpnn.py | #############################################################################################################
# #
# Run a training process for model: MPNN #
# #
#############################################################################################################
def run_train(yaml_filepath, fold):
'''
Run training for specific fold
Arguments: path to config file of the experiment
Saves :
- Train log file
- test predictions gzip file
- cv predictions gzip file
'''
cfg = load_cfg(yaml_filepath)
############################------------- Set Train flags ---------------################################
num_output = cfg['model']['regression']['num_output']
OUT_DIR = cfg['dataset']['output_path']
if num_output == 1:
out_dir = OUT_DIR + '/submit/scalar_output/'
# init preditions arrays
pred_cv = np.zeros( cfg['train']['train_shape'])
pred_sub = np.zeros(cfg['train']['test_shape'])
elif num_output == 5:
out_dir = OUT_DIR + '/submit/multi_output/'
pred_cv = np.zeros((cfg['train']['train_shape'], 5))
pred_sub = np.zeros((cfg['train']['test_shape'], 5))
DATA_DIR = cfg['dataset']['input_path']
normalize = cfg['dataset']['normalize']
gaussrank= cfg['dataset']['gaussrank']
COUPLING_MAX = 136
model_name = cfg['train']['model_name']
model_name = model_name+ '_fold_%s' %fold
batch_size = cfg['train']['batch_size']
predict_type = cfg['train']['predict_type']
loss_name = cfg['train']['loss_name']
predict_type = cfg['model']['regression']['predict_type']
epochs = cfg['train']['epochs']
max_lr = cfg['train']['max_lr']
device = cfg['train']['device']
y_range=cfg['model']['y_range']
############################------------- Init Log file ---------------################################
log = Logger()
log.open(out_dir+'/train/log.train.%s.%s.txt' % (cfg['train']['model_name'], fold), mode='a')
log.write('\n--- [START %s] %s\n\n' % (IDENTIFIER, '-' * 64))
log.write('\t%s\n' % COMMON_STRING)
log.write('\n')
log.write('\tSEED = %u\n' % SEED)
log.write('\tPROJECT_PATH = %s\n' % PROJECT_PATH)
log.write('\t__file__ = %s\n' % __file__)
log.write('\tout_dir = %s\n' % out_dir)
log.write('\tconfig file = %s\n ' % yaml_filepath)
log.write('\n')
############################------------- Load Datasets ---------------################################
log.write('** dataset setting **\n')
log.write('** load parquet data for fold %s **\n' %fold)
validation = gd.read_parquet(DATA_DIR +'/parquet/fold_%s/validation.parquet'%fold)
train = gd.read_parquet(DATA_DIR +'/parquet/fold_%s/train.parquet' %fold)
# convert tensors
log.write('** Convert train tensors **\n')
num_nodes_tensor = from_dlpack(train['num_nodes'].to_dlpack()).long()
num_edges_tensor = from_dlpack(train['num_edge'].to_dlpack()).long()
num_coupling_tensor = from_dlpack(train['num_coupling'].to_dlpack()).long()
node_cols = [i for i in train.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(train[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in train.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(train[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in train.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(train[coupling_cols].to_dlpack()).type(torch.float32)
mol_train = train.molecule_name.unique().to_pandas().values
train_dataset = TensorBatchDataset(mol_train,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_baseline,
COUPLING_MAX=COUPLING_MAX,
mode='train',
csv='train')
del train
# convert validation to tensors
log.write('** Convert validation tensors **\n')
num_nodes_tensor = from_dlpack(validation['num_nodes'].to_dlpack()).long()
num_edges_tensor = from_dlpack(validation['num_edge'].to_dlpack()).long()
num_coupling_tensor = from_dlpack(validation['num_coupling'].to_dlpack()).long()
node_cols = [i for i in validation.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(validation[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in validation.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(validation[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in validation.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(validation[coupling_cols].to_dlpack()).type(torch.float32)
mol_valid = validation.molecule_name.unique().to_pandas().values
valid_dataset = TensorBatchDataset(mol_valid,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_baseline,
COUPLING_MAX=COUPLING_MAX,
mode='train',
csv='train')
del validation
### log dataset info
log.write('batch_size = %d\n'%(batch_size))
log.write('train_dataset : \n%s\n'%(train_dataset))
log.write('valid_dataset : \n%s\n'%(valid_dataset))
log.write('\n')
data = BatchDataBunch.create(train_dataset, valid_dataset, device=device, bs=batch_size)
############################------------- Fastai Learner ---------------################################
log.write('** net setting **\n')
#### Init Fastai learner
net = Net(cfg, y_range=y_range)
log.write('\tCriterion: %s\n'%(loss_name))
### Get GaussRank mapping
log.write('\n Load GaussRank mapping')
data_dir = DATA_DIR + '/parquet'
normalize = cfg['dataset']['normalize']
files = glob.glob(data_dir+'/fold_%s/'%fold+'*.csv')
mapping_frames = ['']*8
coupling_order = ['']*8
for file in files:
type_ = file.split('/')[-1].split('_')[2]
order = int(file.split('/')[-1].split('_')[-1].strip('.csv'))
coupling_order[order] = type_
mapping_frames[order] = pd.read_csv(file)
grm = GaussRankMap(mapping_frames, coupling_order)
optal = partial(RAdam)
learn = Learner(data,
net.cuda(),
metrics=None,
opt_func=optal,
callback_fns=partial(LMAE,
grm=grm,
predict_type=predict_type,
normalize_coupling=normalize,
coupling_rank=gaussrank))
learn.loss_func = partial(train_criterion,
criterion=loss_name,
num_output=num_output,
gaussrank=gaussrank,
pred_type=predict_type)
log.write('\tTraining loss: %s\n'%(learn.loss_func))
log.write('\tfit one cycle of length: %s\n'%epochs)
learn.fit_one_cycle(epochs,
max_lr,
callbacks=[SaveModelCallback(learn,
every='improvement',
monitor='LMAE',
name=cfg['train']['model_name']+'_fold_%s'%fold,
mode='min')])
log.write('\nGet Validation loader\n')
valid_dataset = TensorBatchDataset(mol_valid,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_baseline,
COUPLING_MAX=COUPLING_MAX,
mode='test',
csv='train')
valid_loader = BatchDataLoader(valid_dataset,
shuffle=False,
pin_memory=False,
drop_last=False,
device='cuda')
valid_dataset.get_total_samples()
log.write('\n Compute predictions for validation data at fold %s\n' %fold)
valid_loss, reverse_frame, contributions, molecule_representation = do_test(learn.model,
valid_loader,
valid_dataset.total_samples,
1,
predict_type,
grm,
normalize=normalize,
gaussrank=gaussrank)
print('\n')
print('|------------------------------------ VALID ------------------------------------------------|\n')
print('| 1JHC, 2JHC, 3JHC, 1JHN, 2JHN, 3JHN, 2JHH, 3JHH | loss mae log_mae | fold |\n')
print('|-------------------------------------------------------------------------------------------|\n')
print('|%+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f | %+5.3f %5.2f %+0.2f | %s |\n' %(*valid_loss[:11], fold))
log.write('\n|%+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f | %+5.3f %5.2f %+0.2f | %s |\n' %(*valid_loss[:11], fold))
log.write('\nSave model to disk')
torch.save(learn.model, 'models/' + cfg['train']['model_name'] + '_fold_%s_final_save.pth'%fold)
del nodes_matrix
del edges_matrix
del coupling_matrix
torch.cuda.empty_cache()
log.write('load test data')
test = gd.read_parquet(DATA_DIR +'/parquet/test.parquet')
num_nodes_tensor = from_dlpack(test['num_nodes'].to_dlpack())
num_edges_tensor = from_dlpack(test['num_edge'].to_dlpack())
num_coupling_tensor = from_dlpack(test['num_coupling'].to_dlpack())
node_cols = [i for i in test.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(test[node_cols].to_dlpack())
nodes_matrix = from_dlpack(test[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in test.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(test[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in test.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(test[coupling_cols].to_dlpack()).type(torch.float32)
mol_test = test.molecule_name.unique().to_pandas().values
del test
test_dataset = TensorBatchDataset(mol_test,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_baseline,
COUPLING_MAX=COUPLING_MAX,
mode='test',
csv='test')
test_loader = BatchDataLoader(test_dataset,
shuffle=False,
pin_memory=False,
drop_last=False,
device='cuda')
log.write('\n Compute predictions for test data at fold %s\n' %fold)
test_loss, preds_fold_test, contributions, molecule_representation = do_test(learn.model,
valid_loader,
cfg['train']['test_shape'],
1,
predict_type,
grm,
normalize=normalize,
gaussrank=gaussrank)
log.write('\n Save predictions to disk')
val_loss = valid_loss[-1]
log.write('\n Save Validation frame' )
clock = "{}".format(datetime.now()).replace(' ','-').replace(':','-').split('.')[0]
output_name = out_dir + '/cv_%s_%s_%.4f_fold_%s.csv.gz'%(clock, loss_name, val_loss, fold)
reverse_frame.to_csv(output_name, index=False,compression='gzip')
# save test predictions
log.write('\n Save Test frame' )
clock = "{}".format(datetime.now()).replace(' ','-').replace(':','-').split('.')[0]
output_name = out_dir + '/sub_%s_%s_%.4f_fold_%s.csv.gz'%(clock, loss_name, val_loss, fold)
preds_fold_test.to_csv(output_name, index=False,compression='gzip')
def get_parser():
"""Get parser object."""
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-f", "--file",
dest="filename",
help="experiment definition file",
metavar="FILE",
required=True)
parser.add_argument('--fold', type=int, help='fold id for cv training', required=True)
parser.add_argument('--GPU_id', type=int, help='gpu to use for training', required=True)
parser.add_argument('--best_pred_file', type=str, help='path to best prediction file', required=False)
return parser
#############################################################################################################
# #
# Main function #
# #
#############################################################################################################
if __name__ == '__main__':
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
args = get_parser().parse_args()
import os
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.GPU_id)
import sys
from os.path import dirname, join, abspath
sys.path.insert(0, abspath(join(dirname(__file__), '..')))
import cudf as gd
from fastai.basic_train import *
from fastai.callbacks import SaveModelCallback
from functools import partial
from torch.utils.dlpack import from_dlpack
import glob
import warnings
from mpnn_model.build_predictions import do_test
from mpnn_model.callback import get_reverse_frame, lmae, LMAE
from mpnn_model.common import *
from mpnn_model.common_constants import *
from mpnn_model.dataset import TensorBatchDataset, BatchDataBunch, BatchDataLoader
from mpnn_model.data_collate import tensor_collate_baseline
from mpnn_model.GaussRank import GaussRankMap
from mpnn_model.helpers import load_cfg
from mpnn_model.model import Net
from mpnn_model.radam import *
from mpnn_model.train_loss import train_criterion, lmae_criterion
print( '%s: calling main function ... ' % os.path.basename(__file__))
run_train(args.filename, args.fold)
print('\nsuccess!')
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/scripts/train_mpnn_rnn.py | #############################################################################################################
# #
# Run a training process for model: MPNN+RNN #
# #
#############################################################################################################
def run_train(yaml_filepath, fold):
'''
Run training for specific fold
Arguments: path to config file of the experiment
Saves :
- Train log file
- test predictions gzip file
- cv predictions gzip file
'''
cfg = load_cfg(yaml_filepath)
############################------------- Set Train flags ---------------################################
num_output = cfg['model']['regression']['num_output']
OUT_DIR = cfg['dataset']['output_path']
if num_output == 1:
out_dir = OUT_DIR + '/submit/scalar_output/'
# init preditions arrays
pred_cv = np.zeros( cfg['train']['train_shape'])
pred_sub = np.zeros(cfg['train']['test_shape'])
elif num_output == 5:
out_dir = OUT_DIR + '/submit/multi_output/'
pred_cv = np.zeros((cfg['train']['train_shape'], 5))
pred_sub = np.zeros((cfg['train']['test_shape'], 5))
DATA_DIR = cfg['dataset']['input_path']
normalize = cfg['dataset']['normalize']
gaussrank= cfg['dataset']['gaussrank']
COUPLING_MAX = 136
model_name = cfg['train']['model_name']
model_name = model_name+ '_fold_%s' %fold
batch_size = cfg['train']['batch_size']
predict_type = cfg['train']['predict_type']
loss_name = cfg['train']['loss_name']
predict_type = cfg['model']['regression']['predict_type']
epochs = cfg['train']['epochs']
max_lr = cfg['train']['max_lr']
device = cfg['train']['device']
y_range=cfg['model']['y_range']
############################------------- Init Log file ---------------################################
log = Logger()
log.open(out_dir+'/train/log.train.%s.%s.txt' % (cfg['train']['model_name'], fold), mode='a')
log.write('\n--- [START %s] %s\n\n' % (IDENTIFIER, '-' * 64))
log.write('\t%s\n' % COMMON_STRING)
log.write('\n')
log.write('\tSEED = %u\n' % SEED)
log.write('\tPROJECT_PATH = %s\n' % PROJECT_PATH)
log.write('\t__file__ = %s\n' % __file__)
log.write('\tout_dir = %s\n' % out_dir)
log.write('\tconfig file = %s\n ' % yaml_filepath)
log.write('\n')
############################------------- Load Datasets ---------------################################
log.write('** dataset setting **\n')
log.write('** load parquet data for fold %s **\n' %fold)
validation = gd.read_parquet(DATA_DIR +'/rnn_parquet/fold_%s/validation.parquet'%fold)
train = gd.read_parquet(DATA_DIR +'/rnn_parquet/fold_%s/train.parquet' %fold)
# convert tensors
log.write('** Convert train tensors **\n')
num_nodes_tensor = from_dlpack(train['num_nodes'].to_dlpack()).long()
num_edges_tensor = from_dlpack(train['num_edge'].to_dlpack()).long()
num_coupling_tensor = from_dlpack(train['num_coupling'].to_dlpack()).long()
node_cols = [i for i in train.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(train[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in train.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(train[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in train.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(train[coupling_cols].to_dlpack()).type(torch.float32)
mol_train = train.molecule_name.unique().to_pandas().values
train_dataset = TensorBatchDataset(mol_train,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='train',
csv='train')
del train
# convert validation to tensors
log.write('** Convert validation tensors **\n')
num_nodes_tensor = from_dlpack(validation['num_nodes'].to_dlpack()).long()
num_edges_tensor = from_dlpack(validation['num_edge'].to_dlpack()).long()
num_coupling_tensor = from_dlpack(validation['num_coupling'].to_dlpack()).long()
node_cols = [i for i in validation.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(validation[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in validation.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(validation[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in validation.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(validation[coupling_cols].to_dlpack()).type(torch.float32)
mol_valid = validation.molecule_name.unique().to_pandas().values
valid_dataset = TensorBatchDataset(mol_valid,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='train',
csv='train')
del validation
### log dataset info
log.write('batch_size = %d\n'%(batch_size))
log.write('train_dataset : \n%s\n'%(train_dataset))
log.write('valid_dataset : \n%s\n'%(valid_dataset))
log.write('\n')
data = BatchDataBunch.create(train_dataset, valid_dataset, device=device, bs=batch_size)
############################------------- Fastai Learner ---------------################################
log.write('** net setting **\n')
#### Init Fastai learner
net = Net(cfg, y_range=y_range)
log.write('\tCriterion: %s\n'%(loss_name))
### Get GaussRank mapping
log.write('\n Load GaussRank mapping')
data_dir = DATA_DIR + '/rnn_parquet'
normalize = cfg['dataset']['normalize']
files = glob.glob(data_dir+'/fold_%s/'%fold+'*.csv')
mapping_frames = ['']*8
coupling_order = ['']*8
for file in files:
type_ = file.split('/')[-1].split('_')[2]
order = int(file.split('/')[-1].split('_')[-1].strip('.csv'))
coupling_order[order] = type_
mapping_frames[order] = pd.read_csv(file)
grm = GaussRankMap(mapping_frames, coupling_order)
optal = partial(RAdam)
learn = Learner(data,
net.cuda(),
metrics=None,
opt_func=optal,
callback_fns=partial(LMAE,
grm=grm,
predict_type=predict_type,
normalize_coupling=normalize,
coupling_rank=gaussrank))
learn.loss_func = partial(train_criterion,
criterion=loss_name,
num_output=num_output,
gaussrank=gaussrank,
pred_type=predict_type)
log.write('\tTraining loss: %s\n'%(learn.loss_func))
log.write('\tfit one cycle of length: %s\n'%epochs)
learn.fit_one_cycle(epochs,
max_lr,
callbacks=[SaveModelCallback(learn,
every='improvement',
monitor='LMAE',
name=cfg['train']['model_name']+'_fold_%s'%fold,
mode='min')])
log.write('\nGet Validation loader\n')
valid_dataset = TensorBatchDataset(mol_valid,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='test',
csv='train')
valid_loader = BatchDataLoader(valid_dataset,
shuffle=False,
pin_memory=False,
drop_last=False,
device='cuda')
valid_dataset.get_total_samples()
log.write('\n Compute predictions for validation data at fold %s\n' %fold)
valid_loss, reverse_frame, contributions, molecule_representation = do_test(learn.model,
valid_loader,
valid_dataset.total_samples,
1,
predict_type,
grm,
normalize=normalize,
gaussrank=gaussrank)
print('\n')
print('|------------------------------------ VALID ------------------------------------------------|\n')
print('| 1JHC, 2JHC, 3JHC, 1JHN, 2JHN, 3JHN, 2JHH, 3JHH | loss mae log_mae | fold |\n')
print('|-------------------------------------------------------------------------------------------|\n')
print('|%+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f | %+5.3f %5.2f %+0.2f | %s |\n' %(*valid_loss[:11], fold))
log.write('\n|%+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f | %+5.3f %5.2f %+0.2f | %s |\n' %(*valid_loss[:11], fold))
log.write('\nSave model to disk')
torch.save(learn.model, 'models/' + cfg['train']['model_name'] + '_fold_%s_final_save.pth'%fold)
del nodes_matrix
del edges_matrix
del coupling_matrix
torch.cuda.empty_cache()
log.write('load test data')
test = gd.read_parquet(DATA_DIR +'/rnn_parquet/test.parquet')
num_nodes_tensor = from_dlpack(test['num_nodes'].to_dlpack())
num_edges_tensor = from_dlpack(test['num_edge'].to_dlpack())
num_coupling_tensor = from_dlpack(test['num_coupling'].to_dlpack())
node_cols = [i for i in test.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(test[node_cols].to_dlpack())
nodes_matrix = from_dlpack(test[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in test.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(test[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in test.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(test[coupling_cols].to_dlpack()).type(torch.float32)
mol_test = test.molecule_name.unique().to_pandas().values
del test
test_dataset = TensorBatchDataset(mol_test,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='test',
csv='test')
test_loader = BatchDataLoader(test_dataset,
shuffle=False,
pin_memory=False,
drop_last=False,
device='cuda')
log.write('\n Compute predictions for test data at fold %s\n' %fold)
test_loss, preds_fold_test, contributions, molecule_representation = do_test(learn.model,
valid_loader,
cfg['train']['test_shape'],
1,
predict_type,
grm,
normalize=False,
gaussrank=False)
log.write('\n Save predictions to disk')
val_loss = valid_loss[-1]
log.write('\n Save Validation frame' )
clock = "{}".format(datetime.now()).replace(' ','-').replace(':','-').split('.')[0]
output_name = out_dir + '/cv_%s_%s_%.4f_fold_%s.csv.gz'%(clock, loss_name, val_loss, fold)
reverse_frame.to_csv(output_name, index=False,compression='gzip')
# save test predictions
log.write('\n Save Test frame' )
clock = "{}".format(datetime.now()).replace(' ','-').replace(':','-').split('.')[0]
output_name = out_dir + '/sub_%s_%s_%.4f_fold_%s.csv.gz'%(clock, loss_name, val_loss, fold)
preds_fold_test.to_csv(output_name, index=False,compression='gzip')
def get_parser():
"""Get parser object."""
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-f", "--file",
dest="filename",
help="experiment definition file",
metavar="FILE",
required=True)
parser.add_argument('--fold', type=int, help='fold id for cv training', required=True)
parser.add_argument('--GPU_id', type=int, help='gpu to use for training', required=True)
parser.add_argument('--best_pred_file', type=str, help='path to best prediction file', required=False)
return parser
#############################################################################################################
# #
# Main function #
# #
#############################################################################################################
if __name__ == '__main__':
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
args = get_parser().parse_args()
import os
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.GPU_id)
import sys
from os.path import dirname, join, abspath
sys.path.insert(0, abspath(join(dirname(__file__), '..')))
import cudf as gd
from fastai.basic_train import *
from fastai.callbacks import SaveModelCallback
from functools import partial
from torch.utils.dlpack import from_dlpack
import glob
import warnings
from mpnn_model.build_predictions import do_test
from mpnn_model.callback import get_reverse_frame, lmae, LMAE
from mpnn_model.common import *
from mpnn_model.common_constants import *
from mpnn_model.dataset import TensorBatchDataset, BatchDataBunch, BatchDataLoader
from mpnn_model.data_collate import tensor_collate_rnn
from mpnn_model.GaussRank import GaussRankMap
from mpnn_model.helpers import load_cfg
from mpnn_model.model import Net
from mpnn_model.radam import *
from mpnn_model.train_loss import train_criterion, lmae_criterion
print( '%s: calling main function ... ' % os.path.basename(__file__))
run_train(args.filename, args.fold)
print('\nsuccess!')
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/scripts/bootsrap_train_mpnn_rnn.py | #############################################################################################################
# #
# helper functions #
# #
#############################################################################################################
def build_test_data(fold, grm, coupling_frame, molecule_edge, molecule_node):
# Transform test predictions to gaussrank
df_test = coupling_frame[['coupling_type', 'scalar_coupling_constant']]
df_test.columns = ['type', 'scalar_coupling_constant']
# Reverse type mapping
df_test.type = df_test.type.map(REVERSE_COUPLING_TYPE)
#fit grm
t0 = time()
transformed_test = grm.convert_df(df_test, from_coupling=True)
coupling_frame['gaussrank_coupling'] = transformed_test
print('\nGetting gaussrank transformation for test data took %s seconds\n' %(time()-t0))
# Build molecule coupling frame for fold
coupling_cols = ['atom_index_0', 'atom_index_1', 'coupling_type', 'scalar_coupling_constant', 'gaussrank_coupling',
'fc', 'sd', 'pso', 'dso', 'id',
'path_index_0', 'path_index_1', 'path_index_2','path_index_3',
'path_btype_0', 'path_btype_1', 'path_btype_2',
'path_a_num_0', 'path_a_num_1', 'path_a_num_2', 'path_a_num_3']
shared_cols = ['molecule_name', 'num_coupling', 'coupling_dim']
tmp = coupling_frame.groupby('molecule_name').apply(lambda x: x[coupling_cols].values.reshape(-1))
molecule_coupling = pd.DataFrame(tmp.values.tolist())
# pad coupling_max from 132 to 136
COUPLING_MAX = 136
pad_cols = 21*5
d = dict.fromkeys([str(i) for i in range(molecule_coupling.shape[1], molecule_coupling.shape[1]+pad_cols)], 0.0)
molecule_coupling = molecule_coupling.assign(**d).fillna(0.0)
molecule_coupling['molecule_name'] = tmp.index
molecule_coupling = molecule_coupling.merge(coupling_frame[shared_cols].drop_duplicates(), on='molecule_name', how='left')
cols = molecule_coupling.columns.tolist()
new_cols = cols[-3:] + cols[:-3]
molecule_coupling = molecule_coupling[new_cols]
molecule_coupling.columns = ['molecule_name', 'num_coupling', 'coupling_dim'] + ['coupling_%s'%i for i in range(COUPLING_MAX*21)]
node_edge_frame = pd.merge(molecule_node, molecule_edge, on='molecule_name', how='left')
general_stack_frame = pd.merge(node_edge_frame, molecule_coupling, on='molecule_name', how='left')
return general_stack_frame
#############################################################################################################
# #
# Run a training process for model: MPNN+RNN #
# #
#############################################################################################################
def run_train(yaml_filepath, fold, best_pred_file_path):
'''
Run training for specific fold
Arguments: path to config file of the experiment
Saves :
- Train log file
- test predictions gzip file
- cv predictions gzip file
'''
cfg = load_cfg(yaml_filepath)
############################------------- Set Train flags ---------------################################
num_output = cfg['model']['regression']['num_output']
OUT_DIR = cfg['dataset']['output_path']
if num_output == 1:
out_dir = OUT_DIR + '/submit/scalar_output/'
# init preditions arrays
pred_cv = np.zeros( cfg['train']['train_shape'])
pred_sub = np.zeros(cfg['train']['test_shape'])
elif num_output == 5:
out_dir = OUT_DIR + '/submit/multi_output/'
pred_cv = np.zeros((cfg['train']['train_shape'], 5))
pred_sub = np.zeros((cfg['train']['test_shape'], 5))
DATA_DIR = cfg['dataset']['input_path']
normalize = cfg['dataset']['normalize']
gaussrank= cfg['dataset']['gaussrank']
COUPLING_MAX = 136
model_name = cfg['train']['model_name']
model_name = model_name+ '_fold_%s' %fold
batch_size = cfg['train']['batch_size']
predict_type = cfg['train']['predict_type']
loss_name = cfg['train']['loss_name']
predict_type = cfg['model']['regression']['predict_type']
epochs = cfg['train']['epochs']
max_lr = cfg['train']['max_lr']
device = cfg['train']['device']
y_range=cfg['model']['y_range']
############################------------- Init Log file ---------------################################
log = Logger()
log.open(out_dir+'/train/log.train.%s.%s.txt' % (cfg['train']['model_name'], fold), mode='a')
log.write('\n--- [START %s] %s\n\n' % (IDENTIFIER, '-' * 64))
log.write('\t%s\n' % COMMON_STRING)
log.write('\n')
log.write('\tSEED = %u\n' % SEED)
log.write('\tPROJECT_PATH = %s\n' % PROJECT_PATH)
log.write('\t__file__ = %s\n' % __file__)
log.write('\tout_dir = %s\n' % out_dir)
log.write('\tconfig file = %s\n ' % yaml_filepath)
log.write('\n')
############################----------- Load GRM transformer -------------################################
log.write('\n Load GaussRank mapping for fold %s' %fold)
data_dir = DATA_DIR + '/rnn_parquet'
files = glob.glob(data_dir+'/fold_%s/'%fold+'*.csv')
mapping_frames = ['']*8
coupling_order = ['']*8
for file in files:
type_ = file.split('/')[-1].split('_')[2]
order = int(file.split('/')[-1].split('_')[-1].strip('.csv'))
coupling_order[order] = type_
mapping_frames[order] = pd.read_csv(file)
grm = GaussRankMap(mapping_frames, coupling_order)
############################------------- Load Datasets ---------------################################
log.write('** dataset setting **\n')
COUPLING_MAX = 136
log.write('\nBuild test features for fold %s' %fold)
test= pd.read_csv(DATA_DIR+'/csv/test.csv')
id_test = test.id.values
mol_test = test.molecule_name.values
molecule_edge = pd.read_parquet(DATA_DIR+'/parquet/molecule_edge.parquet')
molecule_edge = molecule_edge[molecule_edge.molecule_name.isin(mol_test)]
molecule_node = pd.read_parquet(DATA_DIR+'/parquet/molecule_node.parquet')
molecule_node = molecule_node[molecule_node.molecule_name.isin(mol_test)]
coupling_frame = pd.read_parquet(DATA_DIR+'/parquet/baseline_rnn_coupling_frame.parquet')
coupling_frame = coupling_frame[coupling_frame.molecule_name.isin(mol_test)]
with gzip.open(best_pred_file_path) as f:
best_stack_test = pd.read_csv(f)
coupling_frame = coupling_frame.merge(best_stack_test, on = 'id', how='left')
test_frame = build_test_data(fold, grm, coupling_frame, molecule_edge, molecule_node)
log.write('\n Load Train/Validation features for fold %s' %fold)
validation = gd.read_parquet(DATA_DIR +'/rnn_parquet/fold_%s/validation.parquet'%fold)
train = gd.read_parquet(DATA_DIR +'/rnn_parquet/fold_%s/train.parquet' %fold)
# transform pandas to cudf
log.write('\n Define new train with test observations' )
test = gd.from_pandas(test_frame)
train = gd.concat([train, test])
del test
# convert tensors
log.write('** Convert train tensors **\n')
num_nodes_tensor = from_dlpack(train['num_nodes'].to_dlpack()).long()
num_edges_tensor = from_dlpack(train['num_edge'].to_dlpack()).long()
num_coupling_tensor = from_dlpack(train['num_coupling'].to_dlpack()).long()
node_cols = [i for i in train.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(train[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in train.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(train[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in train.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(train[coupling_cols].to_dlpack()).type(torch.float32)
mol_train = train.molecule_name.unique().to_pandas().values
train_dataset = TensorBatchDataset(mol_train,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='train',
csv='train')
del train
# convert validation to tensors
log.write('** Convert validation tensors **\n')
num_nodes_tensor = from_dlpack(validation['num_nodes'].to_dlpack()).long()
num_edges_tensor = from_dlpack(validation['num_edge'].to_dlpack()).long()
num_coupling_tensor = from_dlpack(validation['num_coupling'].to_dlpack()).long()
node_cols = [i for i in validation.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(validation[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in validation.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(validation[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in validation.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(validation[coupling_cols].to_dlpack()).type(torch.float32)
mol_valid = validation.molecule_name.unique().to_pandas().values
valid_dataset = TensorBatchDataset(mol_valid,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='train',
csv='train')
del validation
### log dataset info
log.write('batch_size = %d\n'%(batch_size))
log.write('train_dataset : \n%s\n'%(train_dataset))
log.write('valid_dataset : \n%s\n'%(valid_dataset))
log.write('\n')
data = BatchDataBunch.create(train_dataset, valid_dataset, device=device, bs=batch_size)
############################------------- Fastai Learner ---------------################################
log.write('** net setting **\n')
#### Init Fastai learner
net = Net(cfg, y_range=y_range)
log.write('\tCriterion: %s\n'%(loss_name))
optal = partial(RAdam)
learn = Learner(data,
net.cuda(),
metrics=None,
opt_func=optal,
callback_fns=partial(LMAE,
grm=grm,
predict_type=predict_type,
normalize_coupling=normalize,
coupling_rank=gaussrank))
learn.loss_func = partial(train_criterion,
criterion=loss_name,
num_output=num_output,
gaussrank=gaussrank,
pred_type=predict_type)
log.write('\tTraining loss: %s\n'%(learn.loss_func))
log.write('\tfit one cycle of length: %s\n'%epochs)
learn.fit_one_cycle(epochs,
max_lr,
callbacks=[SaveModelCallback(learn,
every='improvement',
monitor='LMAE',
name=cfg['train']['model_name']+'_fold_%s'%fold,
mode='min')])
log.write('\nGet Validation loader\n')
valid_dataset = TensorBatchDataset(mol_valid,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='test',
csv='train')
valid_loader = BatchDataLoader(valid_dataset,
shuffle=False,
pin_memory=False,
drop_last=False,
device=device)
valid_dataset.get_total_samples()
log.write('\n Compute predictions for validation data at fold %s\n' %fold)
valid_loss, reverse_frame, contributions, molecule_representation = do_test(learn.model,
valid_loader,
valid_dataset.total_samples,
1,
predict_type,
grm,
normalize=normalize,
gaussrank=gaussrank)
print('\n')
print('|------------------------------------ VALID ------------------------------------------------|\n')
print('| 1JHC, 2JHC, 3JHC, 1JHN, 2JHN, 3JHN, 2JHH, 3JHH | loss mae log_mae | fold |\n')
print('|-------------------------------------------------------------------------------------------|\n')
print('|%+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f | %+5.3f %5.2f %+0.2f | %s |\n' %(*valid_loss[:11], fold))
log.write('\n|%+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f, %+0.3f | %+5.3f %5.2f %+0.2f | %s |\n' %(*valid_loss[:11], fold))
log.write('\nSave model to disk')
torch.save(learn.model, 'models/' + cfg['train']['model_name'] + '_fold_%s_final_save.pth'%fold)
del nodes_matrix
del edges_matrix
del coupling_matrix
torch.cuda.empty_cache()
log.write('load test data')
test = gd.read_parquet(DATA_DIR +'/rnn_parquet/test.parquet')
num_nodes_tensor = from_dlpack(test['num_nodes'].to_dlpack())
num_edges_tensor = from_dlpack(test['num_edge'].to_dlpack())
num_coupling_tensor = from_dlpack(test['num_coupling'].to_dlpack())
node_cols = [i for i in test.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(test[node_cols].to_dlpack())
nodes_matrix = from_dlpack(test[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in test.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(test[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in test.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(test[coupling_cols].to_dlpack()).type(torch.float32)
mol_test = test.molecule_name.unique().to_pandas().values
del test
test_dataset = TensorBatchDataset(mol_test,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='test',
csv='test')
test_loader = BatchDataLoader(test_dataset,
shuffle=False,
pin_memory=False,
drop_last=False,
device='cuda')
log.write('\n Compute predictions for test data at fold %s\n' %fold)
test_loss, preds_fold_test, contributions, molecule_representation = do_test(learn.model,
valid_loader,
cfg['train']['test_shape'],
1,
predict_type,
grm,
normalize=normalize,
gaussrank=gaussrank)
log.write('\n Save predictions to disk')
val_loss = valid_loss[-1]
log.write('\n Save Validation frame' )
clock = "{}".format(datetime.now()).replace(' ','-').replace(':','-').split('.')[0]
output_name = out_dir + '/cv_%s_%s_%.4f_fold_%s.csv.gz'%(clock, loss_name, val_loss, fold)
reverse_frame.to_csv(output_name, index=False,compression='gzip')
# save test predictions
log.write('\n Save Test frame' )
clock = "{}".format(datetime.now()).replace(' ','-').replace(':','-').split('.')[0]
output_name = out_dir + '/sub_%s_%s_%.4f_fold_%s.csv.gz'%(clock, loss_name, val_loss, fold)
preds_fold_test.to_csv(output_name, index=False,compression='gzip')
def get_parser():
"""Get parser object."""
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-f", "--file",
dest="filename",
help="experiment definition file",
metavar="FILE",
required=True)
parser.add_argument('--fold', type=int, help='fold id for cv training', required=True)
parser.add_argument('--GPU_id', type=int, help='gpu to use for training', required=True)
parser.add_argument('--best_pred_file', type=str, help='path to best prediction file (csv.gz)', required=False)
return parser
#############################################################################################################
# #
# Main function #
# #
#############################################################################################################
if __name__ == '__main__':
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
args = get_parser().parse_args()
import os
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.GPU_id)
import sys
from os.path import dirname, join, abspath
sys.path.insert(0, abspath(join(dirname(__file__), '..')))
import cudf as gd
from fastai.basic_train import *
from fastai.callbacks import SaveModelCallback
from functools import partial
from torch.utils.dlpack import from_dlpack
import glob
import gzip
import warnings
from mpnn_model.build_predictions import do_test
from mpnn_model.callback import get_reverse_frame, lmae, LMAE
from mpnn_model.common import *
from mpnn_model.common_constants import *
from mpnn_model.dataset import TensorBatchDataset, BatchDataBunch, BatchDataLoader
from mpnn_model.data_collate import tensor_collate_rnn
from mpnn_model.GaussRank import GaussRankMap
from mpnn_model.helpers import load_cfg
from mpnn_model.model import Net
from mpnn_model.radam import *
from mpnn_model.train_loss import train_criterion, lmae_criterion
from time import time
print( '%s: calling main function ... ' % os.path.basename(__file__))
run_train(args.filename, args.fold, args.best_pred_file)
print('\nsuccess!')
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/scripts/train_type.py | # Define dataset
# coupling_cols = ['atom_index_0', 'atom_index_1','coupling_type','scalar_coupling',
# 'gaussrank_coupling','fc','sd','pso','dso','id', 'path_index_0', 'path_index_1',
# 'path_index_2', 'path_index_3', 'path_btype_0', 'path_btype_1',
# 'path_btype_2', 'path_a_num_0', 'path_a_num_1', 'path_a_num_2']
#
# edge_cols : ['atom_index_0', 'atom_index_1', 'edge_type', 'distance', 'angle' ]
#
# nodes cols : ['symbol','acceptor', 'donor', 'aromatic', 'hybridization', 'num_h', 'atomic']
#
###################################################
__all__ = ['run']
#############################################################################################################
# #
# Train run #
# #
#############################################################################################################
def run(yaml_filepath, fold, type_, freeze_cycle=4, unfreeze_cycle=40):
cfg = load_cfg(yaml_filepath)
COUPLING_MAX = COUPLING_MAX_DICT[type_]
pretrain_model = model_dict[type_]
###########################------------- Set Train flags ---------------################################
num_output = cfg['model']['regression']['num_output']
OUT_DIR = cfg['dataset']['output_path']
if num_output == 1:
out_dir = OUT_DIR + '/submit/scalar_output/'
# init preditions arrays
pred_cv = np.zeros( cfg['train']['train_shape'])
pred_sub = np.zeros(cfg['train']['test_shape'])
elif num_output == 5:
out_dir = OUT_DIR + '/submit/multi_output/'
pred_cv = np.zeros((cfg['train']['train_shape'], 5))
pred_sub = np.zeros((cfg['train']['test_shape'], 5))
DATA_DIR = cfg['dataset']['input_path']
normalize = cfg['dataset']['normalize']
gaussrank= cfg['dataset']['gaussrank']
model_name = cfg['train']['model_name']
model_name = model_name+ '_fold_%s' %fold
batch_size = cfg['train']['batch_size']
predict_type = cfg['train']['predict_type']
loss_name = cfg['train']['loss_name']
predict_type = cfg['model']['regression']['predict_type']
epochs = cfg['train']['epochs']
max_lr = cfg['train']['max_lr']
device = cfg['train']['device']
############################------------- Init Log file ---------------################################
log = Logger()
log.open(out_dir+'/train/log.train.%s.%s.txt' % (cfg['train']['model_name'], fold), mode='a')
log.write('\n--- [START %s] %s\n\n' % (IDENTIFIER, '-' * 64))
log.write('\t%s\n' % COMMON_STRING)
log.write('\n')
log.write('\tSEED = %u\n' % SEED)
log.write('\tPROJECT_PATH = %s\n' % PROJECT_PATH)
log.write('\t__file__ = %s\n' % __file__)
log.write('\tout_dir = %s\n' % out_dir)
log.write('\tconfig file = %s\n ' % yaml_filepath)
log.write('\n')
log.write('\nTrain model for type %s and fold %s' %(type_, fold))
############################------------- Load Datasets ---------------################################
test= pd.read_csv(DATA_DIR+'/csv/test.csv')
id_test = test.id.values
mol_test = test.molecule_name.values
print('\n Load Train/Validation features for fold %s' %fold)
validation = gd.read_parquet(DATA_DIR +'/rnn_parquet/fold_%s/%s/validation.parquet'%(fold, type_))
train = gd.read_parquet(DATA_DIR +'/rnn_parquet/fold_%s/%s/train.parquet' %(fold, type_))
print('\n Get In-memory Tensor ')
# Convert train to tensors
num_nodes_tensor = from_dlpack(train['num_nodes'].to_dlpack()).long()
num_edges_tensor = from_dlpack(train['num_edge'].to_dlpack()).long()
num_coupling_tensor = from_dlpack(train['num_coupling'].to_dlpack()).long()
node_cols = [i for i in train.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(train[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in train.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(train[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in train.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(train[coupling_cols].to_dlpack()).type(torch.float32)
mol_train = train.molecule_name.unique().to_pandas().values
train_dataset = TensorBatchDataset(mol_train,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='train',
csv='train')
# convert validation to tensors
num_nodes_tensor = from_dlpack(validation['num_nodes'].to_dlpack()).long()
num_edges_tensor = from_dlpack(validation['num_edge'].to_dlpack()).long()
num_coupling_tensor = from_dlpack(validation['num_coupling'].to_dlpack()).long()
node_cols = [i for i in validation.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(validation[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in validation.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(validation[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in validation.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(validation[coupling_cols].to_dlpack()).type(torch.float32)
mol_valid = validation.molecule_name.unique().to_pandas().values
valid_dataset = TensorBatchDataset(mol_valid,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='train',
csv='train')
del train
del validation
data = BatchDataBunch.create(train_dataset, valid_dataset, device=device, bs=batch_size)
############################------------- Load model ---------------################################
if not gaussrank:
net = torch.load('pre_trained_models/coupling_%s_%s_fold_%s_wo_gaussrank.pth'%(type_, pretrain_model, fold))
else:
net = torch.load('pre_trained_models/coupling_%s_%s_fold_%s_gaussrank.pth'%(type_, pretrain_model, fold))
# load grm :
data_dir = DATA_DIR + '/rnn_parquet'
file = glob.glob(data_dir+'/fold_%s/'%fold+'%s/*.csv'%type_)[0]
coupling_order = [type_]
mapping_frames = [pd.read_csv(file)]
grm = GaussRankMap(mapping_frames, coupling_order)
############################------------- Fine tune training ---------------################################
optal = partial(RAdam)
learn = Learner(data,
net,
metrics=None,
opt_func=optal,
callback_fns=partial(LMAE,
grm=grm,
predict_type=predict_type,
normalize_coupling=normalize,
coupling_rank=gaussrank))
learn.loss_func = lmae_criterion
learn.split([[learn.model.preprocess,learn.model.message_function, learn.model.update_function, learn.model.readout],
[learn.model.rnn_attention],[learn.model.dense_layer, learn.model.predict]])
learn.lr_range(slice(1e-3))
learn.freeze()
learn.fit_one_cycle(freeze_cycle, callbacks=[SaveModelCallback(learn,
every='improvement',
monitor='LMAE',
name=cfg['train']['model_name']+'_fold_%s_frozen_type_%s_'%(fold, type_),
mode='min')])
learn.unfreeze()
learn.fit_one_cycle(unfreeze_cycle, max_lr=max_lr, callbacks=[SaveModelCallback(learn,
every='improvement',
monitor='LMAE',
name=cfg['train']['model_name']+'_fold_%s_pretrained_%s_'%(fold, type_),
mode='min')])
############################------------- Build predictions ---------------################################
valid_dataset = TensorBatchDataset(mol_valid,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='test',
csv='train')
valid_loader = BatchDataLoader(valid_dataset,
shuffle=False,
pin_memory=False,
drop_last=False,
device='cuda')
log.write('\n Compute predictions for validation data at fold %s\n' %fold)
valid_loss, reverse_frame, contributions, molecule_representation = do_test(learn.model,
valid_loader,
1,
1,
predict_type,
grm,
normalize=normalize,
gaussrank=gaussrank)
val_loss = valid_loss[-3]
log.write('\nValidation loss is : %s' %val_loss)
log.write('\nSave model to disk')
torch.save(learn.model, 'models/' + cfg['train']['model_name'] + '_fold_%s_final_save.pth'%fold)
log.write('load test data')
torch.cuda.empty_cache()
test = gd.read_parquet(DATA_DIR +'/rnn_parquet/test_%s.parquet'%type_)
num_nodes_tensor = from_dlpack(test['num_nodes'].to_dlpack())
num_edges_tensor = from_dlpack(test['num_edge'].to_dlpack())
num_coupling_tensor = from_dlpack(test['num_coupling'].to_dlpack())
node_cols = [i for i in test.columns if re.compile("^node_[0-9]+").findall(i)]
nodes_matrix = from_dlpack(test[node_cols].to_dlpack())
nodes_matrix = from_dlpack(test[node_cols].to_dlpack()).type(torch.float32)
edge_cols = [i for i in test.columns if re.compile("^edge_[0-9]+").findall(i)]
edges_matrix = from_dlpack(test[edge_cols].to_dlpack()).type(torch.float32)
coupling_cols = [i for i in test.columns if re.compile("^coupling_[0-9]+").findall(i)]
coupling_matrix = from_dlpack(test[coupling_cols].to_dlpack()).type(torch.float32)
mol_test = test.molecule_name.unique().to_pandas().values
#batch_node, batch_edge, batch_coupling, batch_graussrank, batch_num_node, batch_num_edge, batch_num_coupling
del test
test_dataset = TensorBatchDataset(mol_test,
tensors=[nodes_matrix, edges_matrix, coupling_matrix,
num_nodes_tensor, num_edges_tensor, num_coupling_tensor],
batch_size=batch_size,
collate_fn=tensor_collate_rnn,
COUPLING_MAX=COUPLING_MAX,
mode='test',
csv='test')
test_loader = BatchDataLoader(test_dataset,
shuffle=False,
pin_memory=False,
drop_last=False,
device='cuda')
log.write('\n Compute predictions for test data at fold %s\n' %fold)
test_loss, preds_fold_test, contributions, molecule_representation = do_test(learn.model,
valid_loader,
1,
1,
predict_type,
grm,
normalize=normalize,
gaussrank=gaussrank)
# save test predictions
log.write('\n Save predictions to disk')
log.write('\n Save Validation frame' )
clock = "{}".format(datetime.now()).replace(' ','-').replace(':','-').split('.')[0]
output_name = out_dir + '/cv_%s_%s_%.4f_type_%s_fold_%s.csv.gz'%(clock, pretrain_model, val_loss, type_, fold)
reverse_frame.to_csv(output_name, index=False,compression='gzip')
# save test predictions
log.write('\n Save Test frame' )
clock = "{}".format(datetime.now()).replace(' ','-').replace(':','-').split('.')[0]
output_name = out_dir + '/sub_%s_%s_%.4f_type_%s_fold_%s.csv.gz'%(clock, pretrain_model, val_loss, type_, fold)
preds_fold_test.to_csv(output_name, index=False,compression='gzip')
net=None
torch.cuda.empty_cache()
print('\nsuccess!')
def get_parser():
"""Get parser object."""
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-f", "--file",
dest="filename",
help="experiment definition file",
metavar="FILE",
required=True)
parser.add_argument('--fold', type=int, help='fold id for cv training', required=True)
parser.add_argument('--GPU_id', type=int, help='gpu to use for training', required=True)
# parser.add_argument('--best_pred_file', type=str, help='path to best prediction file', required=False)
parser.add_argument('--type', type=str, help='coupling type', required=False)
parser.add_argument('--freeze_cycle', type=int, help='Number of iterations with frozen weights', required=False)
parser.add_argument('--unfreeze_cycle', type=int, help='Number of iterations with unfrozen weights', required=False)
return parser
#############################################################################################################
# #
# Main function #
# #
#############################################################################################################
if __name__ == '__main__':
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
args = get_parser().parse_args()
import os
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.GPU_id)
import sys
from os.path import dirname, join, abspath
sys.path.insert(0, abspath(join(dirname(__file__), '..')))
import cudf as gd
from fastai.basic_train import *
from fastai.callbacks import SaveModelCallback
from functools import partial
from torch.utils.dlpack import from_dlpack
import glob
import warnings
from mpnn_model.build_predictions import do_test
from mpnn_model.callback import get_reverse_frame, lmae, LMAE
from mpnn_model.common import *
from mpnn_model.common_constants import *
from mpnn_model.dataset import TensorBatchDataset, BatchDataBunch, BatchDataLoader
from mpnn_model.data_collate import tensor_collate_rnn
from mpnn_model.GaussRank import GaussRankMap
from mpnn_model.helpers import load_cfg
from mpnn_model.model import Net
from mpnn_model.radam import *
from mpnn_model.train_loss import train_criterion, lmae_criterion
print( '%s: calling main function ... ' % os.path.basename(__file__))
#cfg, fold, type_, pretrain_model, freeze_cycle=4, unfreeze_cycle=40
run(args.filename, args.fold, args.type, args.freeze_cycle, args.unfreeze_cycle)
print('\nsuccess!') | 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/build_data/parallel_process.py | from tqdm import tqdm
from concurrent.futures import ProcessPoolExecutor, as_completed
"""
Credit to http://danshiebler.com
"""
def parallel_process(array, function, n_jobs=16, use_kwargs=False, front_num=3):
"""
A parallel version of the map function with a progress bar.
Args:
array (array-like): An array to iterate over.
function (function): A python function to apply to the elements of array
n_jobs (int, default=16): The number of cores to use
use_kwargs (boolean, default=False): Whether to consider the elements of array as dictionaries of
keyword arguments to function
front_num (int, default=3): The number of iterations to run serially before kicking off the parallel job.
Useful for catching bugs
Returns:
[function(array[0]), function(array[1]), ...]
"""
#We run the first few iterations serially to catch bugs
if front_num > 0:
front = [function(**a) if use_kwargs else function(a) for a in array[:front_num]]
#If we set n_jobs to 1, just run a list comprehension. This is useful for benchmarking and debugging.
if n_jobs==1:
return front + [function(**a) if use_kwargs else function(a) for a in tqdm(array[front_num:])]
#Assemble the workers
with ProcessPoolExecutor(max_workers=n_jobs) as pool:
#Pass the elements of array into function
if use_kwargs:
futures = [pool.submit(function, **a) for a in array[front_num:]]
else:
futures = [pool.submit(function, a) for a in array[front_num:]]
kwargs = {
'total': len(futures),
'unit': 'it',
'unit_scale': True,
'leave': True
}
#Print out the progress as tasks complete
for f in tqdm(as_completed(futures), **kwargs):
pass
out = []
#Get the results from the futures.
for i, future in tqdm(enumerate(futures)):
try:
out.append(future.result())
except Exception as e:
out.append(e)
return front + out | 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/build_data/common.py | from lib.include import *
from lib.utility.draw import *
from lib.utility.file import *
from lib.net.rate import *
#---------------------------------------------------------------------------------
COMMON_STRING ='@%s: \n' % os.path.basename(__file__)
if 1:
SEED = int(time.time()) #35202 #35202 #123 #
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
COMMON_STRING += '\tset random seed\n'
COMMON_STRING += '\t\tSEED = %d\n'%SEED
torch.backends.cudnn.benchmark = True ##uses the inbuilt cudnn auto-tuner to find the fastest convolution algorithms. -
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = True
COMMON_STRING += '\tset cuda environment\n'
COMMON_STRING += '\t\ttorch.__version__ = %s\n'%torch.__version__
COMMON_STRING += '\t\ttorch.version.cuda = %s\n'%torch.version.cuda
COMMON_STRING += '\t\ttorch.backends.cudnn.version() = %s\n'%torch.backends.cudnn.version()
try:
COMMON_STRING += '\t\tos[\'CUDA_VISIBLE_DEVICES\'] = %s\n'%os.environ['CUDA_VISIBLE_DEVICES']
NUM_CUDA_DEVICES = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))
except Exception:
COMMON_STRING += '\t\tos[\'CUDA_VISIBLE_DEVICES\'] = None\n'
NUM_CUDA_DEVICES = 1
COMMON_STRING += '\t\ttorch.cuda.device_count() = %d\n'%torch.cuda.device_count()
#print ('\t\ttorch.cuda.current_device() =', torch.cuda.current_device())
COMMON_STRING += '\n'
#---------------------------------------------------------------------------------
## useful : http://forums.fast.ai/t/model-visualization/12365/2
if __name__ == '__main__':
print (COMMON_STRING) | 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/build_data/build_train_validation_rnn.ipynb | from build_predictions import *
from GaussRank import GaussRankMap
import pandas as pd
from create_parquet import *
from data import *
import warnings
warnings.filterwarnings("ignore") DATA_DIR='/rapids/notebooks/srabhi/champs-2019/input/'node_frame = pd.read_csv(DATA_DIR+'parquet/baseline_node_frame.csv')node_frame.head(2)node_cols = ['symbol','acceptor', 'donor', 'aromatic', 'hybridization', 'num_h', 'atomic']
shared_cols = ['molecule_name', 'num_nodes', 'node_dim']
tmp = node_frame.groupby('molecule_name').apply(lambda x: x[node_cols].values.reshape(-1))
molecule_node = pd.DataFrame(tmp.values.tolist())
#pad node max 29 to 32
pad_cols = 21
d = dict.fromkeys([str(i) for i in range(molecule_node.shape[1], molecule_node.shape[1]+pad_cols)], 0.0)
molecule_node = molecule_node.assign(**d).fillna(0.0)
molecule_node['molecule_name'] = tmp.index
molecule_node = molecule_node.merge(node_frame[shared_cols].drop_duplicates(), on='molecule_name', how='left')
cols = molecule_node.columns.tolist()
new_cols = cols[-3:] + cols[:-3]
molecule_node = molecule_node[new_cols]
molecule_node.columns = ['molecule_name', 'num_nodes', 'node_dim'] + ['node_%s'%i for i in range(NODE_MAX*7)]
molecule_node.head(2)molecule_node.to_parquet(DATA_DIR+ 'parquet/molecule_node.parquet')edge_frame = pd.read_csv(DATA_DIR+'parquet/baseline_edge_frame.csv')edge_frame.head(2)edge_cols = ['atom_index_0', 'atom_index_1', 'edge_type', 'distance', 'angle' ]
shared_cols = ['molecule_name', 'num_edge', 'edge_dim']
tmp = edge_frame.groupby('molecule_name').apply(lambda x: x[edge_cols].values.reshape(-1))
molecule_edge = pd.DataFrame(tmp.values.tolist())
#pad edge_max 812 to 816
pad_cols = 4 * 5
d = dict.fromkeys([str(i) for i in range(molecule_edge.shape[1], molecule_edge.shape[1]+pad_cols)], 0.0)
molecule_edge = molecule_edge.assign(**d).fillna(0.0)
molecule_edge['molecule_name'] = tmp.index
molecule_edge = molecule_edge.merge(edge_frame[shared_cols].drop_duplicates(), on='molecule_name', how='left')
cols = molecule_edge.columns.tolist()
new_cols = cols[-3:] + cols[:-3]
molecule_edge = molecule_edge[new_cols]
molecule_edge.columns = ['molecule_name', 'num_edge', 'edge_dim']+ ['edge_%s'%i for i in range(EDGE_MAX*5)]
molecule_edge.head(2)molecule_edge.to_parquet(DATA_DIR+ 'parquet/molecule_edge.parquet')coupling_frame = pd.read_csv(DATA_DIR+'parquet/baseline_coupling_frame.csv')cols = ['molecule_name', 'num_coupling', 'coupling_dim', 'atom_index_0', 'atom_index_1', 'coupling_type', 'scalar_coupling',
'fc', 'sd', 'pso', 'dso', 'id']
coupling_frame = coupling_frame[cols]coupling_frame.head(2)shortest_path_frame_train = pd.read_csv('/rapids/notebooks/srabhi/champs-2019/input/train_shortest_path.csv')
shortest_path_frame_test = pd.read_csv('/rapids/notebooks/srabhi/champs-2019/input/test_shortest_path.csv')shortest_path_frame = pd.concat([shortest_path_frame_train, shortest_path_frame_test])# save the overall shotest path frame
shortest_path_frame.to_csv('/rapids/notebooks/srabhi/champs-2019/input/shortest_path.csv', index=False)cols = [ 'id', 'molecule_name', 'atom_index_0', 'atom_index_1', 'path_index_0', 'path_index_1',
'path_index_2', 'path_index_3', 'path_btype_0', 'path_btype_1',
'path_btype_2', 'path_a_num_0', 'path_a_num_1', 'path_a_num_2',
'path_a_num_3']
shortest_path_frame = shortest_path_frame[cols]shortest_path_frame.head(3)new_coupling_frame = pd.merge(coupling_frame, shortest_path_frame, on=['id', 'molecule_name', 'atom_index_0', 'atom_index_1'], how='left')new_coupling_frame.head(3)new_coupling_frame.columnsnew_coupling_frame.molecule_name.nunique()new_coupling_frame.to_parquet(DATA_DIR+'parquet/baseline_rnn_coupling_frame.parquet')from time import time
def save_cv_data(fold, coupling_frame):
print('fold: %s' %fold)
split_train = 'train_split_by_mol_hash.%s.npy'%fold
split_valid = 'valid_split_by_mol_hash.%s.npy'%fold
id_train_ = np.load(DATA_DIR + '/split/%s'%split_train,allow_pickle=True)
id_valid_ = np.load(DATA_DIR + '/split/%s'%split_valid,allow_pickle=True)
csv = 'test'
df = pd.read_csv(DATA_DIR + '/csv/%s.csv'%csv)
id_test_ = df.molecule_name.unique()
train = coupling_frame[coupling_frame.molecule_name.isin(id_train_)]
validation = coupling_frame[coupling_frame.molecule_name.isin(id_valid_)]
test = coupling_frame[coupling_frame.molecule_name.isin(id_test_)]
# Get GaussRank of coupling values
t0 = time()
grm = GaussRankMap()
df_train = train[['coupling_type', 'scalar_coupling']]
df_valid = validation[['coupling_type', 'scalar_coupling']]
df_train.columns = ['type', 'scalar_coupling_constant']
df_valid.columns = ['type', 'scalar_coupling_constant']
# Reverse type mapping
df_train.type = df_train.type.map(REVERSE_COUPLING_TYPE)
df_valid.type = df_valid.type.map(REVERSE_COUPLING_TYPE)
#fit grm
transformed_training = grm.fit_training(df_train, reset=True)
transformed_validation = grm.convert_df(df_valid, from_coupling=True)
validation['gaussrank_coupling'] = transformed_validation
train['gaussrank_coupling'] = transformed_training
print('Getting gaussrank transformation for train/validation data took %s seconds' %(time()-t0))
print(grm.coupling_order)
test['gaussrank_coupling'] = 0
general_coupling_frame = pd.concat([train, validation, test])
# Build molecule coupling frame for fold
coupling_cols = ['atom_index_0', 'atom_index_1', 'coupling_type', 'scalar_coupling', 'gaussrank_coupling',
'fc', 'sd', 'pso', 'dso', 'id',
'path_index_0', 'path_index_1', 'path_index_2','path_index_3',
'path_btype_0', 'path_btype_1', 'path_btype_2',
'path_a_num_0', 'path_a_num_1', 'path_a_num_2', 'path_a_num_3']
shared_cols = ['molecule_name', 'num_coupling', 'coupling_dim']
tmp = general_coupling_frame.groupby('molecule_name').apply(lambda x: x[coupling_cols].values.reshape(-1))
molecule_coupling = pd.DataFrame(tmp.values.tolist())
# pad coupling_max from 135 to 136
pad_cols = 21
d = dict.fromkeys([str(i) for i in range(molecule_coupling.shape[1], molecule_coupling.shape[1]+pad_cols)], 0.0)
molecule_coupling = molecule_coupling.assign(**d).fillna(0.0)
molecule_coupling['molecule_name'] = tmp.index
molecule_coupling = molecule_coupling.merge(general_coupling_frame[shared_cols].drop_duplicates(), on='molecule_name', how='left')
cols = molecule_coupling.columns.tolist()
new_cols = cols[-3:] + cols[:-3]
molecule_coupling = molecule_coupling[new_cols]
molecule_coupling.columns = ['molecule_name', 'num_coupling', 'coupling_dim'] + ['coupling_%s'%i for i in range(COUPLING_MAX*21)]
print(molecule_coupling.shape, molecule_edge.shape, molecule_node.shape)
node_edge_frame = pd.merge(molecule_node, molecule_edge, on='molecule_name', how='left')
general_stack_frame = pd.merge(node_edge_frame, molecule_coupling, on='molecule_name', how='left')
train_frame = general_stack_frame[general_stack_frame.molecule_name.isin(id_train_)]
validation_frame = general_stack_frame[general_stack_frame.molecule_name.isin(id_valid_)]
test_frame = general_stack_frame[general_stack_frame.molecule_name.isin(id_test_)]
validation_frame.to_parquet(DATA_DIR+'rnn_parquet/fold_%s'%fold+'/validation.parquet')
train_frame.to_parquet(DATA_DIR +'rnn_parquet/fold_%s'%fold+ '/train.parquet')
# save mapping
for i, (type_, frame) in enumerate(zip(grm.coupling_order, grm.training_maps)):
frame.to_csv(DATA_DIR +'rnn_parquet/fold_%s'%fold+'/mapping_type_%s_order_%s.csv'%(type_, i), index=False)
return test_frame
test_frame = save_cv_data(0, new_coupling_frame) %%time
folds = 4
for fold in range(1,folds):
print(coupling_frame.shape)
test_frame = save_cv_data(fold, new_coupling_frame) test_frame.to_parquet(DATA_DIR +'/rnn_parquet/test.parquet')cols = ['atom_index_0', 'atom_index_1', 'coupling_type', 'scalar_coupling', 'gaussrank_coupling',
'fc', 'sd', 'pso', 'dso', 'id',
'path_index_0', 'path_index_1', 'path_index_2','path_index_3',
'path_btype_0', 'path_btype_1', 'path_btype_2',
'path_a_num_0', 'path_a_num_1', 'path_a_num_2', 'path_a_num_3']cols[10:14] | 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/build_data/data.py | #
#
#
# This module aims to create molecule graphs from Kaggle data and rdkit
#
# It also give the possibilit to create cv folds as .npy files with molecule names
#
#
#
#####################################################################################
from common import *
from atom_features import *
from collections import defaultdict
import networkx as nx
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import ChemicalFeatures
from rdkit import RDConfig
import rdkit.Chem.Draw
from rdkit.Chem.Draw.MolDrawing import MolDrawing, DrawingOptions
DrawingOptions.bondLineWidth=1.8
from rdkit.Chem.rdmolops import SanitizeFlags
import os
from functools import partial
import argparse
import pandas as pd
import cudf as gd
import numpy as np
import scipy
from sklearn import preprocessing
# __all__ = ['make_graph', 'do_one', 'run_convert_to_graph', 'run_make_split' ]
## Helpers for feature extraction #####################################################
COUPLING_TYPE_STATS=[
#type #mean, std, min, max
'1JHC', 94.9761528641869, 18.27722399839607, 66.6008, 204.8800,
'2JHC', -0.2706244378832, 4.52360876732858, -36.2186, 42.8192,
'3JHC', 3.6884695895355, 3.07090647005439, -18.5821, 76.0437,
'1JHN', 47.4798844844683, 10.92204561670947, 24.3222, 80.4187,
'2JHN', 3.1247536134185, 3.67345877025737, -2.6209, 17.7436,
'3JHN', 0.9907298624944, 1.31538940138001, -3.1724, 10.9712,
'2JHH', -10.2866051639817, 3.97960190019757, -35.1761, 11.8542,
'3JHH', 4.7710233597359, 3.70498129755812, -3.0205, 17.4841,
]
NUM_COUPLING_TYPE = len(COUPLING_TYPE_STATS)//5
COUPLING_TYPE_MEAN = [ COUPLING_TYPE_STATS[i*5+1] for i in range(NUM_COUPLING_TYPE)]
COUPLING_TYPE_STD = [ COUPLING_TYPE_STATS[i*5+2] for i in range(NUM_COUPLING_TYPE)]
COUPLING_TYPE = [ COUPLING_TYPE_STATS[i*5 ] for i in range(NUM_COUPLING_TYPE)]
#--- Set of Categorical modalities
SYMBOL = ['H', 'C', 'N', 'O', 'F']
BOND_TYPE = [
Chem.rdchem.BondType.SINGLE,
Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE,
Chem.rdchem.BondType.AROMATIC,
]
HYBRIDIZATION=[
#Chem.rdchem.HybridizationType.S,
Chem.rdchem.HybridizationType.SP,
Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3,
#Chem.rdchem.HybridizationType.SP3D,
#Chem.rdchem.HybridizationType.SP3D2,
]
def one_hot_encoding(x, set):
"""
One-Hot Encode categorical variables
"""
one_hot = [int(x == s) for s in set]
if 0:
if sum(one_hot)==0: print('one_hot_encoding() return NULL!', x, set)
return one_hot
def label_encoding(x, set):
"""
Encode categorical variables to int Ids
"""
try:
return set.index(x)+1
except:
return 0
''' Graph Structure
node_feature :
category
(symbol,SYMBOL) #5
(acceptor,) #1
(donor, ) #1
(aromatic,) #1
one_hot_encoding(hybridization,HYBRIDIZATION) #3
real
(num_h, ) #1
(atomic, ) #1
edge_feature :
category
(bond_type,BOND_TYPE) #4
real
np.digitize(distance,DISTANCE) #1
angle #1
coupling: Structure
id:
contributions:
index:
type:
value:
'''
#############################################################################################################
# #
# Molecule graph representation #
# #
#############################################################################################################
def make_graph(molecule_name, gb_structure, gb_scalar_coupling,
categorical_encoding='one_hot', normalize_coupling=False, rank=False) :
"""
make_graph --> returns graph as 'Struct' object (see /lib/utility/file.py)
Args:
- molecule_name : (str)
- gb_structure (DataFrame GroupBy): groupby structure: data groupped by molecule name
- gb_scalar_coupling (DataFrame GroupBy): The coupling contributions data groupped by molecule name
- categorical_encoding (str): How represent categorical variables : label vs one-hot enconding
"""
#---- Coupling informatiom
# ['id', 'molecule_name', 'atom_index_0', 'atom_index_1', 'type', 'scalar_coupling_constant', 'fc', 'sd', 'pso', 'dso'],
df = gb_scalar_coupling.get_group(molecule_name)
coupling_index = np.array([ COUPLING_TYPE.index(t) for t in df.type.values ], np.int32)
scalar_coupling_constant = df.scalar_coupling_constant.values
if normalize_coupling:
coupling_mean = np.array([COUPLING_TYPE_MEAN[x] for x in coupling_index], np.float32)
coupling_std = np.array([COUPLING_TYPE_STD[x] for x in coupling_index], np.float32)
scalar_coupling_constant = (scalar_coupling_constant - coupling_mean) / coupling_std
coupling = Struct(
id = df.id.values,
contribution = df[['fc', 'sd', 'pso', 'dso']].values,
index = df[['atom_index_0', 'atom_index_1']].values,
type = coupling_index,
value = scalar_coupling_constant,
)
#---- Molecule structure information
df = gb_structure.get_group(molecule_name)
df = df.sort_values(['atom_index'], ascending=True)
# ['molecule_name', 'atom_index', 'atom', 'x', 'y', 'z']
a = df.atom.values.tolist()
xyz = df[['x','y','z']].values
mol = mol_from_axyz(a, xyz)
#---
assert( #check
a == [ mol.GetAtomWithIdx(i).GetSymbol() for i in range(mol.GetNumAtoms())]
)
#--- Atoms information
factory = ChemicalFeatures.BuildFeatureFactory(os.path.join(RDConfig.RDDataDir, 'BaseFeatures.fdef'))
feature = factory.GetFeaturesForMol(mol)
if categorical_encoding =='one_hot':
## ** node features **
num_atom = mol.GetNumAtoms()
symbol = np.zeros((num_atom,len(SYMBOL)),np.uint8) #category
acceptor = np.zeros((num_atom,1),np.uint8) #bool
donor = np.zeros((num_atom,1),np.uint8) #bool
aromatic = np.zeros((num_atom,1),np.uint8) #bool
hybridization = np.zeros((num_atom,len(HYBRIDIZATION)),np.uint8) #category
num_h = np.zeros((num_atom,1),np.float32) #real
atomic = np.zeros((num_atom,1),np.float32) #real
for i in range(num_atom):
atom = mol.GetAtomWithIdx(i)
symbol[i] = one_hot_encoding(atom.GetSymbol(),SYMBOL)
aromatic[i] = atom.GetIsAromatic()
hybridization[i] = one_hot_encoding(atom.GetHybridization(),HYBRIDIZATION)
num_h[i] = atom.GetTotalNumHs(includeNeighbors=True)
atomic[i] = atom.GetAtomicNum()
for t in range(0, len(feature)):
if feature[t].GetFamily() == 'Donor':
for i in feature[t].GetAtomIds():
donor[i] = 1
elif feature[t].GetFamily() == 'Acceptor':
for i in feature[t].GetAtomIds():
acceptor[i] = 1
## ** edge features **
num_edge = num_atom*num_atom - num_atom
edge_index = np.zeros((num_edge,2), np.uint8) # int tuples
bond_type = np.zeros((num_edge,len(BOND_TYPE)), np.uint8) #category
distance = np.zeros((num_edge,1),np.float32) #real
angle = np.zeros((num_edge,1),np.float32) #real
relative_angle = np.zeros((num_edge,1),np.float32) #real
norm_xyz = preprocessing.normalize(xyz, norm='l2')
ij=0
for i in range(num_atom):
for j in range(num_atom):
if i==j: continue
edge_index[ij] = [i,j]
bond = mol.GetBondBetweenAtoms(i, j)
if bond is not None:
bond_type[ij] = one_hot_encoding(bond.GetBondType(),BOND_TYPE)
distance[ij] = ((xyz[i] - xyz[j])**2).sum()**0.5
angle[ij] = (norm_xyz[i]*norm_xyz[j]).sum()
ij+=1
elif categorical_encoding =='label':
## ** node features **
num_atom = mol.GetNumAtoms()
symbol = np.zeros((num_atom,1),np.uint8) #category
acceptor = np.zeros((num_atom,1),np.uint8) #bool
donor = np.zeros((num_atom,1),np.uint8) #bool
aromatic = np.zeros((num_atom,1),np.uint8) #bool
hybridization = np.zeros((num_atom,1),np.uint8) #category
num_h = np.zeros((num_atom,1),np.float32) #real
atomic = np.zeros((num_atom,1),np.float32) #real
for i in range(num_atom):
atom = mol.GetAtomWithIdx(i)
symbol[i] = label_encoding(atom.GetSymbol(), SYMBOL)
aromatic[i] = atom.GetIsAromatic()
hybridization[i] = label_encoding(atom.GetHybridization(),HYBRIDIZATION)
num_h[i] = atom.GetTotalNumHs(includeNeighbors=True)
atomic[i] = atom.GetAtomicNum()
for t in range(0, len(feature)):
if feature[t].GetFamily() == 'Donor':
for i in feature[t].GetAtomIds():
donor[i] = 1
elif feature[t].GetFamily() == 'Acceptor':
for i in feature[t].GetAtomIds():
acceptor[i] = 1
## ** edge features **
num_edge = num_atom*num_atom - num_atom
edge_index = np.zeros((num_edge,2), np.uint8) # int tuples
bond_type = np.zeros((num_edge,1), np.uint8) #category
distance = np.zeros((num_edge,1),np.float32) #real
angle = np.zeros((num_edge,1),np.float32) #real
norm_xyz = preprocessing.normalize(xyz, norm='l2')
ij=0
for i in range(num_atom):
for j in range(num_atom):
if i==j: continue
edge_index[ij] = [i,j]
bond = mol.GetBondBetweenAtoms(i, j)
if bond is not None:
bond_type[ij] = label_encoding(bond.GetBondType(),BOND_TYPE)
distance[ij] = ((xyz[i] - xyz[j])**2).sum()**0.5
angle[ij] = (norm_xyz[i]*norm_xyz[j]).sum()
ij+=1
else :
raise Exception(f"""{categorical_encoding} invalid categorical labeling""")
##---- Define the graph structure
graph = Struct(
molecule_name = molecule_name,
smiles = Chem.MolToSmiles(mol),
axyz = [a,xyz],
node = [symbol, acceptor, donor, aromatic, hybridization, num_h, atomic,],
edge = [bond_type, distance, angle],
edge_index = edge_index,
coupling = coupling,
)
return graph
#############################################################################################################
# #
# Load Champs Datasets #
# #
#############################################################################################################
def read_champs_xyz(xyz_file):
line = read_list_from_file(xyz_file, comment=None)
num_atom = int(line[0])
xyz=[]
symbol=[]
for n in range(num_atom):
l = line[1+n]
l = l.replace('\t', ' ').replace(' ', ' ')
l = l.split(' ')
symbol.append(l[0])
xyz.append([float(l[1]),float(l[2]),float(l[3]),])
return symbol, xyz
def mol_from_axyz(symbol, xyz):
charged_fragments = True
quick = True
charge = 0
atom_no = get_atomicNumList(symbol)
mol = xyz2mol(atom_no, xyz, charge, charged_fragments, quick)
return mol
def load_csv():
"""
load_csv --> load the GroupBy DataFrames (Grouping by molecule names)
"""
DATA_DIR = '/rapids/notebooks/srabhi/champs-2019/input'
#structure
df_structure = pd.read_csv(DATA_DIR + '/csv/structures.csv')
#coupling
df_train = pd.read_csv(DATA_DIR + '/csv/train.csv')
df_test = pd.read_csv(DATA_DIR + '/csv/test.csv')
df_test['scalar_coupling_constant']=0
df_scalar_coupling = pd.concat([df_train,df_test])
df_scalar_coupling_contribution = pd.read_csv(DATA_DIR + '/csv/scalar_coupling_contributions.csv')
df_scalar_coupling = pd.merge(df_scalar_coupling, df_scalar_coupling_contribution,
how='left', on=['molecule_name','atom_index_0','atom_index_1','atom_index_0','type'])
gb_scalar_coupling = df_scalar_coupling.groupby('molecule_name')
gb_structure = df_structure.groupby('molecule_name')
return gb_structure, gb_scalar_coupling
#############################################################################################################
# #
# Tests check . #
# #
#############################################################################################################
def run_check_xyz():
''' check xyz files '''
xyz_dir = '/rapids/notebooks/srabhi/champs-2019/input/structures'
name =[
'dsgdb9nsd_000001',
'dsgdb9nsd_000002',
'dsgdb9nsd_000005',
'dsgdb9nsd_000007',
'dsgdb9nsd_037490',
'dsgdb9nsd_037493',
'dsgdb9nsd_037494',
]
for n in name:
xyz_file = xyz_dir + '/%s.xyz'%n
symbol, xyz = read_champs_xyz(xyz_file)
mol = mol_from_axyz(symbol, xyz)
smiles = Chem.MolToSmiles(mol)
print(n, smiles)
image = np.array(Chem.Draw.MolToImage(mol,size=(128,128)))
image_show('',image)
cv2.waitKey(0)
def run_check_graph():
''' check graph construction '''
gb_structure, gb_scalar_coupling = load_csv()
molecule_name = 'dsgdb9nsd_000001'
normalize_coupling = False
graph = make_graph(molecule_name, gb_structure, gb_scalar_coupling, normalize_coupling)
print('')
print(graph)
print('graph.molecule_name:', graph.molecule_name)
print('graph.smiles:', graph.smiles)
print('graph.node:', np.concatenate(graph.node,-1).shape)
print('graph.edge:', np.concatenate(graph.edge,-1).shape)
print('graph.edge_index:', graph.edge_index.shape)
print('-----')
print('graph.coupling.index:', graph.coupling.index.shape)
print('graph.coupling.type:', graph.coupling.type.shape)
print('graph.coupling.value:', graph.coupling.value.shape)
print('graph.coupling.contribution:', graph.coupling.contribution.shape)
print('graph.coupling.id:', graph.coupling.id)
print('')
exit(0)
zz=0
#############################################################################################################
# #
# Build graphs #
# #
#############################################################################################################
def do_one(p, categorical_encoding='one_hot', normalize_coupling=False):
''' Create and save the graph of molecule name: p '''
i, molecule_name, gb_structure, gb_scalar_coupling, graph_file = p
g = make_graph(molecule_name, gb_structure, gb_scalar_coupling, categorical_encoding, normalize_coupling)
print(i, g.molecule_name, g.smiles)
write_pickle_to_file(graph_file,g)
##----
def run_convert_to_graph(categorical_encoding='one_hot', normalize_coupling = False , graph_dir='/champs-2019/input/structure/graph1'):
'''
Convert Train and Test data to graph structures and save each graph as .pkl file in graph_dir path
'''
# graph_dir = '/champs-2019/input/structure/graph1'
os.makedirs(graph_dir, exist_ok=True)
gb_structure, gb_scalar_coupling = load_csv()
molecule_names = list(gb_scalar_coupling.groups.keys())
molecule_names = np.sort(molecule_names)
param=[]
for i, molecule_name in enumerate(molecule_names):
graph_file = graph_dir + '/%s.pickle'%molecule_name
p = (i, molecule_name, gb_structure, gb_scalar_coupling, graph_file)
if i<2000:
do_one(p, categorical_encoding, normalize_coupling)
else:
param.append(p)
if 1:
pool = mp.Pool(processes=16)
pool.map(partial(do_one, categorical_encoding=categorical_encoding, normalize_coupling=normalize_coupling), param)
#############################################################################################################
# #
# Build Cross-Validation folds #
# #
#############################################################################################################
def run_make_split(folds):
'''
Methods for building cv folds: each fold is represented by two .npy files of unique molecule names in train / validation data fold
Arguments :
folds (type: int): number of validation folds
save train / valid npy files with related molecule names.
'''
split_dir = '/rapids/notebooks/srabhi/champs-2019/input/split'
csv_file = '/rapids/notebooks/srabhi/champs-2019/input/csv/train.csv'
print('Read train data')
df = gd.read_csv(csv_file)
df['molecule_name_hash'] = df['molecule_name'].data.hash()
# get unique molecules
print('Get unique molecules names')
molecule_names = df['molecule_name'].unique().to_pandas().values
molecule_names = np.sort(molecule_names)
print('Create train / validation folds')
debug_split = molecule_names[:1000]
np.save(split_dir + '/debug_split_by_mol.%d.npy'%len(debug_split), debug_split)
print(debug_split[0:5]) #'dsgdb9nsd_001679'
for fold in range(folds):
print(fold)
mask = df['molecule_name_hash']%folds==fold
tr, va = df[~mask]['molecule_name'],df[mask]['molecule_name']
train_split = tr.unique().to_pandas().values
valid_split = va.unique().to_pandas().values
np.save(split_dir + '/train_split_by_mol_hash.%d.npy'%(fold),train_split)
np.save(split_dir + '/valid_split_by_mol_hash.%d.npy'%(fold),valid_split)
pass
#############################################################################################################
# #
# main program #
# #
#############################################################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Build graph and cross-validation data')
parser.add_argument('--cv', default=False, action ='store_true', help='whether to build cv npy folds or not')
parser.add_argument('--folds', type=int, help='number of validation folds', required=False)
parser.add_argument('--categorical_encoding', type=str, help='How to encode categorical values: "one_hot" vs "label"', required=False )
parser.add_argument('--graph_dir', type=str, help='output dir for saving the graph structure of all the molecules', required=False)
parser.add_argument('--normalize', default=False, action ='store_true', help='whether to normalize couplings', required=False)
args = parser.parse_args()
print( '%s: calling main function ... ' % os.path.basename(__file__))
# test the graph structure : run_check_graph()
if args.cv:
# Build cv folds
run_make_split(args.folds)
# Convert data to graphs
if args.graph_dir:
run_convert_to_graph(args.categorical_encoding, args.normalize, args.graph_dir)
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/build_data/atom_features.py | import numpy as np
from collections import defaultdict
import copy
import itertools
import networkx as nx
#External package
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import ChemicalFeatures
from rdkit import RDConfig
import rdkit.Chem.Draw
from rdkit.Chem.Draw.MolDrawing import MolDrawing, DrawingOptions
DrawingOptions.bondLineWidth=1.8
from rdkit.Chem.rdmolops import SanitizeFlags
BOND_TYPE = [
Chem.rdchem.BondType.SINGLE,
Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE,
Chem.rdchem.BondType.AROMATIC,
]
HYBRIDIZATION=[
#Chem.rdchem.HybridizationType.S,
Chem.rdchem.HybridizationType.SP,
Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3,
#Chem.rdchem.HybridizationType.SP3D,
#Chem.rdchem.HybridizationType.SP3D2,
]
## xyz to mol #############################################################
def mol_from_axyz(symbol, xyz):
charged_fragments = True
quick = True
charge = 0
atom_no = get_atomicNumList(symbol)
mol = xyz2mol(atom_no, xyz, charge, charged_fragments, quick)
return mol
def get_atom(atom):
ATOM = [x.strip() for x in ['h ', 'he', \
'li', 'be', 'b ', 'c ', 'n ', 'o ', 'f ', 'ne', \
'na', 'mg', 'al', 'si', 'p ', 's ', 'cl', 'ar', \
'k ', 'ca', 'sc', 'ti', 'v ', 'cr', 'mn', 'fe', 'co', 'ni', 'cu', \
'zn', 'ga', 'ge', 'as', 'se', 'br', 'kr', \
'rb', 'sr', 'y ', 'zr', 'nb', 'mo', 'tc', 'ru', 'rh', 'pd', 'ag', \
'cd', 'in', 'sn', 'sb', 'te', 'i ', 'xe', \
'cs', 'ba', 'la', 'ce', 'pr', 'nd', 'pm', 'sm', 'eu', 'gd', 'tb', 'dy', \
'ho', 'er', 'tm', 'yb', 'lu', 'hf', 'ta', 'w ', 're', 'os', 'ir', 'pt', \
'au', 'hg', 'tl', 'pb', 'bi', 'po', 'at', 'rn', \
'fr', 'ra', 'ac', 'th', 'pa', 'u ', 'np', 'pu']]
atom = atom.lower()
return ATOM.index(atom) + 1
def getUA(maxValence_list, valence_list):
UA = []
DU = []
for i, (maxValence, valence) in enumerate(zip(maxValence_list, valence_list)):
if maxValence - valence > 0:
UA.append(i)
DU.append(maxValence - valence)
return UA, DU
def get_BO(AC, UA, DU, valences, UA_pairs, quick):
BO = AC.copy()
DU_save = []
while DU_save != DU:
for i, j in UA_pairs:
BO[i, j] += 1
BO[j, i] += 1
BO_valence = list(BO.sum(axis=1))
DU_save = copy.copy(DU)
UA, DU = getUA(valences, BO_valence)
UA_pairs = get_UA_pairs(UA, AC, quick)[0]
return BO
def valences_not_too_large(BO, valences):
number_of_bonds_list = BO.sum(axis=1)
for valence, number_of_bonds in zip(valences, number_of_bonds_list):
if number_of_bonds > valence:
return False
return True
def BO_is_OK(BO, AC, charge, DU, atomic_valence_electrons, atomicNumList, charged_fragments):
Q = 0 # total charge
q_list = []
if charged_fragments:
BO_valences = list(BO.sum(axis=1))
for i, atom in enumerate(atomicNumList):
q = get_atomic_charge(atom, atomic_valence_electrons[atom], BO_valences[i])
Q += q
if atom == 6:
number_of_single_bonds_to_C = list(BO[i, :]).count(1)
if number_of_single_bonds_to_C == 2 and BO_valences[i] == 2:
Q += 1
q = 2
if number_of_single_bonds_to_C == 3 and Q + 1 < charge:
Q += 2
q = 1
if q != 0:
q_list.append(q)
if (BO - AC).sum() == sum(DU) and charge == Q and len(q_list) <= abs(charge):
return True
else:
return False
def get_atomic_charge(atom, atomic_valence_electrons, BO_valence):
if atom == 1:
charge = 1 - BO_valence
elif atom == 5:
charge = 3 - BO_valence
elif atom == 15 and BO_valence == 5:
charge = 0
elif atom == 16 and BO_valence == 6:
charge = 0
else:
charge = atomic_valence_electrons - 8 + BO_valence
return charge
def clean_charges(mol):
# this hack should not be needed any more but is kept just in case
#
rxn_smarts = ['[N+:1]=[*:2]-[C-:3]>>[N+0:1]-[*:2]=[C-0:3]',
'[N+:1]=[*:2]-[O-:3]>>[N+0:1]-[*:2]=[O-0:3]',
'[N+:1]=[*:2]-[*:3]=[*:4]-[O-:5]>>[N+0:1]-[*:2]=[*:3]-[*:4]=[O-0:5]',
'[#8:1]=[#6:2]([!-:6])[*:3]=[*:4][#6-:5]>>[*-:1][*:2]([*:6])=[*:3][*:4]=[*+0:5]',
'[O:1]=[c:2][c-:3]>>[*-:1][*:2][*+0:3]',
'[O:1]=[C:2][C-:3]>>[*-:1][*:2]=[*+0:3]']
fragments = Chem.GetMolFrags(mol, asMols=True, sanitizeFrags=False)
for i, fragment in enumerate(fragments):
for smarts in rxn_smarts:
patt = Chem.MolFromSmarts(smarts.split(">>")[0])
while fragment.HasSubstructMatch(patt):
rxn = AllChem.ReactionFromSmarts(smarts)
ps = rxn.RunReactants((fragment,))
fragment = ps[0][0]
if i == 0:
mol = fragment
else:
mol = Chem.CombineMols(mol, fragment)
return mol
def BO2mol(mol, BO_matrix, atomicNumList, atomic_valence_electrons, mol_charge, charged_fragments):
# based on code written by Paolo Toscani
l = len(BO_matrix)
l2 = len(atomicNumList)
BO_valences = list(BO_matrix.sum(axis=1))
if (l != l2):
raise RuntimeError('sizes of adjMat ({0:d}) and atomicNumList '
'{1:d} differ'.format(l, l2))
rwMol = Chem.RWMol(mol)
bondTypeDict = {
1: Chem.BondType.SINGLE,
2: Chem.BondType.DOUBLE,
3: Chem.BondType.TRIPLE
}
for i in range(l):
for j in range(i + 1, l):
bo = int(round(BO_matrix[i, j]))
if (bo == 0):
continue
bt = bondTypeDict.get(bo, Chem.BondType.SINGLE)
rwMol.AddBond(i, j, bt)
mol = rwMol.GetMol()
if charged_fragments:
mol = set_atomic_charges(mol, atomicNumList, atomic_valence_electrons, BO_valences, BO_matrix, mol_charge)
else:
mol = set_atomic_radicals(mol, atomicNumList, atomic_valence_electrons, BO_valences)
return mol
def set_atomic_charges(mol, atomicNumList, atomic_valence_electrons, BO_valences, BO_matrix, mol_charge):
q = 0
for i, atom in enumerate(atomicNumList):
a = mol.GetAtomWithIdx(i)
charge = get_atomic_charge(atom, atomic_valence_electrons[atom], BO_valences[i])
q += charge
if atom == 6:
number_of_single_bonds_to_C = list(BO_matrix[i, :]).count(1)
if number_of_single_bonds_to_C == 2 and BO_valences[i] == 2:
q += 1
charge = 0
if number_of_single_bonds_to_C == 3 and q + 1 < mol_charge:
q += 2
charge = 1
if (abs(charge) > 0):
a.SetFormalCharge(int(charge))
# shouldn't be needed anymore bit is kept just in case
# mol = clean_charges(mol)
return mol
def set_atomic_radicals(mol, atomicNumList, atomic_valence_electrons, BO_valences):
# The number of radical electrons = absolute atomic charge
for i, atom in enumerate(atomicNumList):
a = mol.GetAtomWithIdx(i)
charge = get_atomic_charge(atom, atomic_valence_electrons[atom], BO_valences[i])
if (abs(charge) > 0):
a.SetNumRadicalElectrons(abs(int(charge)))
return mol
def get_bonds(UA, AC):
bonds = []
for k, i in enumerate(UA):
for j in UA[k + 1:]:
if AC[i, j] == 1:
bonds.append(tuple(sorted([i, j])))
return bonds
def get_UA_pairs(UA, AC, quick):
bonds = get_bonds(UA, AC)
if len(bonds) == 0:
return [()]
if quick:
G = nx.Graph()
G.add_edges_from(bonds)
UA_pairs = [list(nx.max_weight_matching(G))]
return UA_pairs
max_atoms_in_combo = 0
UA_pairs = [()]
for combo in list(itertools.combinations(bonds, int(len(UA) / 2))):
flat_list = [item for sublist in combo for item in sublist]
atoms_in_combo = len(set(flat_list))
if atoms_in_combo > max_atoms_in_combo:
max_atoms_in_combo = atoms_in_combo
UA_pairs = [combo]
# if quick and max_atoms_in_combo == 2*int(len(UA)/2):
# return UA_pairs
elif atoms_in_combo == max_atoms_in_combo:
UA_pairs.append(combo)
return UA_pairs
def AC2BO(AC, atomicNumList, charge, charged_fragments, quick):
# TODO
atomic_valence = defaultdict(list)
atomic_valence[1] = [1]
atomic_valence[6] = [4]
atomic_valence[7] = [4, 3]
atomic_valence[8] = [2, 1]
atomic_valence[9] = [1]
atomic_valence[14] = [4]
atomic_valence[15] = [5, 4, 3]
atomic_valence[16] = [6, 4, 2]
atomic_valence[17] = [1]
atomic_valence[32] = [4]
atomic_valence[35] = [1]
atomic_valence[53] = [1]
atomic_valence_electrons = {}
atomic_valence_electrons[1] = 1
atomic_valence_electrons[6] = 4
atomic_valence_electrons[7] = 5
atomic_valence_electrons[8] = 6
atomic_valence_electrons[9] = 7
atomic_valence_electrons[14] = 4
atomic_valence_electrons[15] = 5
atomic_valence_electrons[16] = 6
atomic_valence_electrons[17] = 7
atomic_valence_electrons[32] = 4
atomic_valence_electrons[35] = 7
atomic_valence_electrons[53] = 7
# make a list of valences, e.g. for CO: [[4],[2,1]]
valences_list_of_lists = []
for atomicNum in atomicNumList:
valences_list_of_lists.append(atomic_valence[atomicNum])
# convert [[4],[2,1]] to [[4,2],[4,1]]
valences_list = list(itertools.product(*valences_list_of_lists))
best_BO = AC.copy()
# implemenation of algorithm shown in Figure 2
# UA: unsaturated atoms
# DU: degree of unsaturation (u matrix in Figure)
# best_BO: Bcurr in Figure
#
for valences in valences_list:
AC_valence = list(AC.sum(axis=1))
UA, DU_from_AC = getUA(valences, AC_valence)
if len(UA) == 0 and BO_is_OK(AC, AC, charge, DU_from_AC, atomic_valence_electrons, atomicNumList,
charged_fragments):
return AC, atomic_valence_electrons
UA_pairs_list = get_UA_pairs(UA, AC, quick)
for UA_pairs in UA_pairs_list:
BO = get_BO(AC, UA, DU_from_AC, valences, UA_pairs, quick)
if BO_is_OK(BO, AC, charge, DU_from_AC, atomic_valence_electrons, atomicNumList, charged_fragments):
return BO, atomic_valence_electrons
elif BO.sum() >= best_BO.sum() and valences_not_too_large(BO, valences):
best_BO = BO.copy()
return best_BO, atomic_valence_electrons
def AC2mol(mol, AC, atomicNumList, charge, charged_fragments, quick):
# convert AC matrix to bond order (BO) matrix
BO, atomic_valence_electrons = AC2BO(AC, atomicNumList, charge, charged_fragments, quick)
# add BO connectivity and charge info to mol object
mol = BO2mol(mol, BO, atomicNumList, atomic_valence_electrons, charge, charged_fragments)
return mol
def get_proto_mol(atomicNumList):
mol = Chem.MolFromSmarts("[#" + str(atomicNumList[0]) + "]")
rwMol = Chem.RWMol(mol)
for i in range(1, len(atomicNumList)):
a = Chem.Atom(atomicNumList[i])
rwMol.AddAtom(a)
mol = rwMol.GetMol()
return mol
def get_atomicNumList(atomic_symbols):
atomicNumList = []
for symbol in atomic_symbols:
atomicNumList.append(get_atom(symbol))
return atomicNumList
def xyz2AC(atomicNumList, xyz):
mol = get_proto_mol(atomicNumList)
conf = Chem.Conformer(mol.GetNumAtoms())
for i in range(mol.GetNumAtoms()):
conf.SetAtomPosition(i, xyz[i].tolist())
mol.AddConformer(conf)
dMat = Chem.Get3DDistanceMatrix(mol)
pt = Chem.GetPeriodicTable()
num_atoms = len(atomicNumList)
AC = np.zeros((num_atoms, num_atoms)).astype(int)
for i in range(num_atoms):
a_i = mol.GetAtomWithIdx(i)
Rcov_i = pt.GetRcovalent(a_i.GetAtomicNum()) * 1.30
for j in range(i + 1, num_atoms):
a_j = mol.GetAtomWithIdx(j)
Rcov_j = pt.GetRcovalent(a_j.GetAtomicNum()) * 1.30
if dMat[i, j] <= Rcov_i + Rcov_j:
AC[i, j] = 1
AC[j, i] = 1
return AC, mol
def read_xyz_file(filename):
atomic_symbols = []
xyz_coordinates = []
with open(filename, "r") as file:
for line_number, line in enumerate(file):
if line_number == 0:
num_atoms = int(line)
elif line_number == 1:
if "charge=" in line:
charge = int(line.split("=")[1])
else:
charge = 0
else:
atomic_symbol, x, y, z = line.split()
atomic_symbols.append(atomic_symbol)
xyz_coordinates.append([float(x), float(y), float(z)])
atomicNumList = get_atomicNumList(atomic_symbols)
return atomicNumList, xyz_coordinates, charge
# -----
## https://www.kaggle.com/sunhwan/using-rdkit-for-atomic-feature-and-visualization
def chiral_stereo_check(mol):
# avoid sanitization error e.g., dsgdb9nsd_037900.xyz
Chem.SanitizeMol(mol, SanitizeFlags.SANITIZE_ALL - SanitizeFlags.SANITIZE_PROPERTIES)
Chem.DetectBondStereochemistry(mol, -1)
# ignore stereochemistry for now
Chem.AssignStereochemistry(mol, flagPossibleStereoCenters=True, force=True)
Chem.AssignAtomChiralTagsFromStructure(mol, -1)
return mol
def xyz2mol(atomicNumList, xyz_coordinates, charge, charged_fragments, quick):
AC, mol = xyz2AC(atomicNumList, xyz_coordinates)
new_mol = AC2mol(mol, AC, atomicNumList, charge, charged_fragments, quick)
new_mol = chiral_stereo_check(new_mol)
return new_mol
def MolFromXYZ(filename):
charged_fragments = True
quick = True
atomicNumList, xyz_coordinates, charge = (filename)
mol = xyz2mol(atomicNumList, xyz_coordinates, charge, charged_fragments, quick)
return mol | 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/build_data/baseline_node_frame_from_csv_pandas.ipynb | import cudf as gdpath = '/rapids/notebooks/srabhi/champs-2019/input/csv/'%%time
train = gd.read_csv('%s/train.csv'%path)
test = gd.read_csv('%s/test.csv'%path)
print(train.shape,test.shape)
for col in train.columns:
if train[col].dtype!='O':
train[col] = train[col].astype('float32')
if col in test.columns:
test[col] = test[col].astype('float32')
struct = gd.read_csv('%s/structures.csv'%path)
struct.head().to_pandas()
for col in struct.columns:
if struct[col].dtype!='O':
struct[col] = struct[col].astype('float32')
print(struct.shape)contribs = gd.read_csv('%s/scalar_coupling_contributions.csv'%path)contribs.head().to_pandas()train.head().to_pandas()struct.head().to_pandas()struct.atom.unique().to_pandas()molecule_info = struct[struct.molecule_name=='dsgdb9nsd_000001']mol_pandas = molecule_info.to_pandas()
atoms = mol_pandas.atom.values.tolist()
xyz = mol_pandas[['x','y','z']].valuesxyzstructure_pandas = struct.to_pandas()from atom_features import *
import os
import pandas as pd
def get_nodes_info(mol_id):
molecule_structure = gb.get_group(mol_id)
molecule_structure = molecule_structure.sort_values(['atom_index'], ascending=True)
atoms = molecule_structure.atom.values.tolist()
xyz = molecule_structure[['x','y','z']].values
molecule_rep = mol_from_axyz(atoms, xyz)
#--- Get Atoms information from molecule representation
factory = ChemicalFeatures.BuildFeatureFactory(os.path.join(RDConfig.RDDataDir, 'BaseFeatures.fdef'))
feature = factory.GetFeaturesForMol(molecule_rep)
num_nodes = molecule_rep.GetNumAtoms()
#--- Get Atoms' features
feature_list = []
for i in range(num_nodes):
atom = molecule_rep.GetAtomWithIdx(i)
feature_list.append([mol_id, num_nodes, i, atom.GetSymbol(), atom.GetIsAromatic(), atom.GetHybridization(),
atom.GetTotalNumHs(), atom.GetAtomicNum()])
# Get Acceptor / Donor flags
donor = np.zeros(num_nodes)
acceptor = np.zeros(num_nodes)
for t in range(0, len(feature)):
if feature[t].GetFamily() == 'Donor':
for i in feature[t].GetAtomIds():
donor[i] = 1
elif feature[t].GetFamily() == 'Acceptor':
for i in feature[t].GetAtomIds():
acceptor[i] = 1
arr = np.concatenate([np.array(feature_list).reshape(-1, 8), donor.reshape(-1, 1), acceptor.reshape(-1, 1)], axis=1)
return pd.DataFrame(arr, columns=['molecule_name', 'num_nodes', 'atom_index', 'symbol',
'aromatic', 'hybridization', 'num_h',
'atomic', 'acceptor', 'donor']) %%time
gb = structure_pandas.groupby('molecule_name')molecules = structure_pandas.molecule_name.unique()
get_nodes_info(molecules[1])from parallel_process import parallel_process
nodes_info_frames = parallel_process(molecules, get_nodes_info)nodes_baseline = pd.concat(nodes_info_frames)nodes_baseline.head(4)node_frame.to_csv('/rapids/notebooks/srabhi/champs-2019/input/parquet/baseline_node_frame_2.csv', index=False) | 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/build_data/create_parquet.py | #
#
#
#
# This module aims to create a parquet file from .pkl graph files
#
# It also gives the possibility to compute the gaussrank and create
# train/validation .parquet files for each fold with. each line represents
# a molecules and its information in the following order :
# 'molecule_name',
# 'num_node', 'num_edge','num_coupling',
# 'node_dim', 'edge_dim','coupling_dim',
# 'node_0', ...., 'node_NODE_MAX*7'
# 'edge_0', ...., 'edge_EDGE_MAX*5'
# 'coupling_0', ...., 'coupling_COUPLING_MAX*9'
# 'gaussrank_0', ....., 'gaussrank_COUPLING_MAX'
#
#
#
#####################################################################################
import glob
from parallel_process import parallel_process
from data import *
from common import *
from GaussRank import *
from functools import partial
import pandas as pd
from time import time
NODE_MAX = 32
EDGE_MAX = 816
COUPLING_MAX = 136
def get_one_vector_from_graph(molecule_file):
'''
- molecule file: path to %molecule_name.pickle
Returns:
Convert the pickled graph to a padded vector with all the molecule information
'''
molecule_name = molecule_file.split('/')[-1].strip('.pickle')
graph = read_pickle_from_file(molecule_file)
molecule_name = graph.molecule_name
node_feats = np.concatenate(graph.node,-1)
edge_feats = np.concatenate(graph.edge,-1)
edge_feats = np.concatenate([graph.edge_index, edge_feats], -1)
coupling = np.concatenate([graph.coupling.index, graph.coupling.type.reshape(-1, 1),
graph.coupling.value.reshape(-1,1), graph.coupling.contribution,
graph.coupling.id.reshape(-1,1)], -1)
num_node, node_dim = node_feats.shape
num_edge, edge_dim = edge_feats.shape
num_coupling, coupling_dim = coupling.shape
infor = [molecule_name, num_node, num_edge, num_coupling, node_dim, edge_dim, coupling_dim]
return infor, node_feats.reshape(num_node*node_dim), edge_feats.reshape(num_edge*edge_dim), coupling.reshape(num_coupling*coupling_dim)
def build_general_frame(graph_dir, parquet_dir='/rapids/notebooks/srabhi/champs-2019/input/parquet/'):
'''
Args:
- graph_dir to use for getting molecule information:
- graph1: one_hot encoding for categorical values + acutal value of scalar coupling constant
- graph2: label encoding for cats + actual scalar coupling
- graph3: one_hot encoding for cats + normalized scalar coupling
- graph4: label encoding for cats + normalized scalar coupling
- parquet_dir:
- output directory where to store the general parquet frame
'''
files = glob.glob(graph_dir+'/*.pickle')
tabular_data = parallel_process(files, get_one_vector_from_graph)
nodes = []
infos = []
edges = []
coupling = []
for i in tabular_data:
infos.append(i[0])
nodes.append(i[1])
edges.append(i[2])
coupling.append(i[3])
info_frame, node_frame, edge_frame, coupling_frame = (pd.DataFrame(infos,columns=['molecule_name', 'num_node', 'num_edge',
'num_coupling', 'node_dim', 'edge_dim', 'coupling_dim']),
pd.DataFrame(nodes), pd.DataFrame(edges), pd.DataFrame(coupling))
### Get a multiple 8 for gpu ops
# pad 29 nodes to node_max 32 :
pad_cols = 21
d = dict.fromkeys([str(i) for i in range(node_frame.shape[1], node_frame.shape[1]+pad_cols)], 0.0)
node_frame = node_frame.assign(**d).fillna(0.0)
# pad edge_max 812 to 816
pad_cols = 20
d = dict.fromkeys([str(i) for i in range(edge_frame.shape[1], edge_frame.shape[1]+pad_cols)], 0.0)
edge_frame = edge_frame.assign(**d).fillna(0.0)
# pad coupling_max to 136
pad_cols = 9
d = dict.fromkeys([str(i) for i in range(coupling_frame.shape[1], coupling_frame.shape[1]+pad_cols)], 0.0)
coupling_frame = coupling_frame.assign(**d).fillna(0.0)
# concat the whole frame
general_frame = pd.concat([info_frame, node_frame, edge_frame, coupling_frame], axis=1)
general_frame = general_frame.fillna(0.0)
print('Dataframe created for %s molecules' %general_frame.shape[0])
cols = ['molecule_name', 'num_node', 'num_edge', 'num_coupling', 'node_dim', 'edge_dim', 'coupling_dim'] + \
['node_%s'%i for i in range(NODE_MAX*7)] + ['edge_%s'%i for i in range(EDGE_MAX*5)] + ['coupling_%s'%i for i in range(COUPLING_MAX*9)]
general_frame.columns = cols
general_frame.to_parquet(os.path.join(parquet_dir, 'general_frame.parquet'))
def build_test_data(data, DATA_DIR = '/rapids/notebooks/srabhi/champs-2019/input'):
#data = gd.read_parquet(DATA_DIR +'/parquet/general_frame.parquet')
csv = 'test'
df = pd.read_csv(DATA_DIR + '/csv/%s.csv'%csv)
id_test = gd.DataFrame()
mol_test = df.molecule_name.unique()
id_test['molecule_name'] = mol_test
test_data = id_test.merge(data, on='molecule_name', how='left')
tmp = pd.DataFrame(np.zeros((45772, 136), dtype=float))
tmp.columns = ['gaussrank_%s'%i for i in range(136)]
tmp = gd.from_pandas(tmp)
tmp['molecule_name'] = test_data.molecule_name
test = tmp.merge(test_data, on='molecule_name', how='left')
test.to_parquet(DATA_DIR +'/parquet/test_frame.parquet')
def build_cv_ranks_parquet(data, fold, DATA_DIR = '/rapids/notebooks/srabhi/champs-2019/input'):
print(fold)
### Get data
split_train = 'train_split_by_mol_hash.%s.npy'%fold
split_valid = 'valid_split_by_mol_hash.%s.npy'%fold
id_train_ = np.load(DATA_DIR + '/split/%s'%split_train,allow_pickle=True)
id_valid_ = np.load(DATA_DIR + '/split/%s'%split_valid,allow_pickle=True)
df = pd.read_csv(DATA_DIR + '/csv/train.csv')
#data = gd.read_parquet(DATA_DIR+'/parquet/general_frame.parquet')
train = df[df.molecule_name.isin(id_train_)]
validation = df[df.molecule_name.isin(id_valid_)]
# Get GaussRank of coupling values
t0 = time()
grm = GaussRankMap()
transformed_training = grm.fit_training(train, reset=True)
transformed_validation = grm.convert_df(validation, from_coupling=True)
validation['transformed_coupling'] = transformed_validation
train['transformed_coupling'] = transformed_training
print('Getting gaussrank transformation for train/validation data took %s seconds' %(time()-t0))
print(grm.coupling_order)
# Get the rank coupling values at the molecule level and pad coupling rank values to 136 :
validation_gaussrank = validation.groupby('molecule_name').apply(lambda x : x['transformed_coupling'].values)
train_gaussrank = train.groupby('molecule_name').apply(lambda x : x['transformed_coupling'].values)
val_ranks = pd.DataFrame(validation_gaussrank.tolist()).fillna(0.0)
num_cols = val_ranks.shape[1]
pad_cols = 136 - num_cols
d = dict.fromkeys([str(i) for i in range(num_cols, num_cols+pad_cols)], 0.0)
val_ranks = val_ranks.assign(**d)
val_ranks = val_ranks.astype(float)
val_ranks.columns = ['gaussrank_%s'%i for i in range(136)]
val_ranks['molecule_name'] = validation_gaussrank.index
train_ranks = pd.DataFrame(train_gaussrank.tolist()).fillna(0.0)
num_cols = train_ranks.shape[1]
pad_cols = 136 - num_cols
d = dict.fromkeys([str(i) for i in range(num_cols, num_cols+pad_cols)], 0.0)
train_ranks = train_ranks.assign(**d)
train_ranks = train_ranks.astype(float)
train_ranks.columns = ['gaussrank_%s'%i for i in range(136)]
train_ranks['molecule_name'] = train_gaussrank.index
# Merge with node /edge/coupling frame
id_valid = gd.DataFrame()
id_valid['molecule_name'] = id_valid_
valid_data = id_valid.merge(data, on='molecule_name', how='left')
id_valid = gd.DataFrame()
id_valid['molecule_name'] = id_valid_
valid_data = id_valid.merge(data, on='molecule_name', how='left').to_pandas()
validation_frame = pd.merge(valid_data, val_ranks, on='molecule_name', how='left')
# Merge with node /edge/coupling frame
id_train= gd.DataFrame()
id_train['molecule_name'] = id_train_
train_data = id_valid.merge(data, on='molecule_name', how='left')
id_train = gd.DataFrame()
id_train['molecule_name'] = id_train_
train_data = id_train.merge(data, on='molecule_name', how='left').to_pandas()
training_frame = pd.merge(train_data, train_ranks, on='molecule_name', how='left')
# Save parquet files for fold
parquet_dir = DATA_DIR + '/parquet/fold_%s' %fold
if not os.path.exists(parquet_dir):
os.makedirs(parquet_dir)
training_frame.to_parquet(parquet_dir+'/train.parquet')
validation_frame.to_parquet(parquet_dir+'/validation.parquet')
# save mapping
for i, (type_, frame) in enumerate(zip(grm.coupling_order, grm.training_maps)):
frame.to_csv(parquet_dir+'/mapping_type_%s_order_%s.csv'%(type_, i), index=False)
pass
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/build_data/build_train_validation.ipynb | #from build_predictions import *
#from GaussRank import GaussRankMap
import pandas as pd
#from create_parquet import *
#from data import *
import warnings
warnings.filterwarnings("ignore") DATA_DIR='/rapids/notebooks/srabhi/champs-2019/input/'node_frame = pd.read_csv(DATA_DIR+'parquet/baseline_node_frame.csv')node_frame.head(2)node_cols = ['symbol','acceptor', 'donor', 'aromatic', 'hybridization', 'num_h', 'atomic']
shared_cols = ['molecule_name', 'num_nodes', 'node_dim']
tmp = node_frame.groupby('molecule_name').apply(lambda x: x[node_cols].values.reshape(-1))
molecule_node = pd.DataFrame(tmp.values.tolist())
#pad node max 29 to 32
pad_cols = 21
d = dict.fromkeys([str(i) for i in range(molecule_node.shape[1], molecule_node.shape[1]+pad_cols)], 0.0)
molecule_node = molecule_node.assign(**d).fillna(0.0)
molecule_node['molecule_name'] = tmp.index
molecule_node = molecule_node.merge(node_frame[shared_cols].drop_duplicates(), on='molecule_name', how='left')
cols = molecule_node.columns.tolist()
new_cols = cols[-3:] + cols[:-3]
molecule_node = molecule_node[new_cols]
molecule_node.columns = ['molecule_name', 'num_nodes', 'node_dim'] + ['node_%s'%i for i in range(NODE_MAX*7)]
molecule_node.head(2)edge_frame = pd.read_csv(DATA_DIR+'parquet/baseline_edge_frame.csv')edge_frame.head(2)edge_cols = ['atom_index_0', 'atom_index_1', 'edge_type', 'distance', 'angle' ]
shared_cols = ['molecule_name', 'num_edge', 'edge_dim']
tmp = edge_frame.groupby('molecule_name').apply(lambda x: x[edge_cols].values.reshape(-1))
molecule_edge = pd.DataFrame(tmp.values.tolist())
#pad edge_max 812 to 816
pad_cols = 4 * 5
d = dict.fromkeys([str(i) for i in range(molecule_edge.shape[1], molecule_edge.shape[1]+pad_cols)], 0.0)
molecule_edge = molecule_edge.assign(**d).fillna(0.0)
molecule_edge['molecule_name'] = tmp.index
molecule_edge = molecule_edge.merge(edge_frame[shared_cols].drop_duplicates(), on='molecule_name', how='left')
cols = molecule_edge.columns.tolist()
new_cols = cols[-3:] + cols[:-3]
molecule_edge = molecule_edge[new_cols]
molecule_edge.columns = ['molecule_name', 'num_edge', 'edge_dim']+ ['edge_%s'%i for i in range(EDGE_MAX*5)]
molecule_edge.head(2)coupling_frame = pd.read_csv(DATA_DIR+'parquet/baseline_coupling_frame.csv')coupling_frame.head(2)from time import time
def save_cv_data(fold, coupling_frame):
print('fold: %s' %fold)
split_train = 'train_split_by_mol_hash.%s.npy'%fold
split_valid = 'valid_split_by_mol_hash.%s.npy'%fold
id_train_ = np.load(DATA_DIR + '/split/%s'%split_train,allow_pickle=True)
id_valid_ = np.load(DATA_DIR + '/split/%s'%split_valid,allow_pickle=True)
csv = 'test'
df = pd.read_csv(DATA_DIR + '/csv/%s.csv'%csv)
id_test_ = df.molecule_name.unique()
train = coupling_frame[coupling_frame.molecule_name.isin(id_train_)]
validation = coupling_frame[coupling_frame.molecule_name.isin(id_valid_)]
test = coupling_frame[coupling_frame.molecule_name.isin(id_test_)]
# Get GaussRank of coupling values
t0 = time()
grm = GaussRankMap()
df_train = train[['coupling_type', 'scalar_coupling']]
df_valid = validation[['coupling_type', 'scalar_coupling']]
df_train.columns = ['type', 'scalar_coupling_constant']
df_valid.columns = ['type', 'scalar_coupling_constant']
# Reverse type mapping
df_train.type = df_train.type.map(REVERSE_COUPLING_TYPE)
df_valid.type = df_valid.type.map(REVERSE_COUPLING_TYPE)
#fit grm
transformed_training = grm.fit_training(df_train, reset=True)
transformed_validation = grm.convert_df(df_valid, from_coupling=True)
validation['gaussrank_coupling'] = transformed_validation
train['gaussrank_coupling'] = transformed_training
print('Getting gaussrank transformation for train/validation data took %s seconds' %(time()-t0))
print(grm.coupling_order)
test['gaussrank_coupling'] = 0
general_coupling_frame = pd.concat([train, validation, test])
# Build molecule coupling frame for fold
coupling_cols = ['atom_index_0', 'atom_index_1', 'coupling_type', 'scalar_coupling', 'gaussrank_coupling', 'fc', 'sd', 'pso', 'dso', 'id']
shared_cols = ['molecule_name', 'num_coupling', 'coupling_dim']
tmp = general_coupling_frame.groupby('molecule_name').apply(lambda x: x[coupling_cols].values.reshape(-1))
molecule_coupling = pd.DataFrame(tmp.values.tolist())
# pad coupling_max from 135 to 136
pad_cols = 10
d = dict.fromkeys([str(i) for i in range(molecule_coupling.shape[1], molecule_coupling.shape[1]+pad_cols)], 0.0)
molecule_coupling = molecule_coupling.assign(**d).fillna(0.0)
molecule_coupling['molecule_name'] = tmp.index
molecule_coupling = molecule_coupling.merge(general_coupling_frame[shared_cols].drop_duplicates(), on='molecule_name', how='left')
cols = molecule_coupling.columns.tolist()
new_cols = cols[-3:] + cols[:-3]
molecule_coupling = molecule_coupling[new_cols]
molecule_coupling.columns = ['molecule_name', 'num_coupling', 'coupling_dim'] + ['coupling_%s'%i for i in range(COUPLING_MAX*10)]
print(molecule_coupling.shape, molecule_edge.shape, molecule_node.shape)
node_edge_frame = pd.merge(molecule_node, molecule_edge, on='molecule_name', how='left')
general_stack_frame = pd.merge(node_edge_frame, molecule_coupling, on='molecule_name', how='left')
train_frame = general_stack_frame[general_stack_frame.molecule_name.isin(id_train_)]
validation_frame = general_stack_frame[general_stack_frame.molecule_name.isin(id_valid_)]
test_frame = general_stack_frame[general_stack_frame.molecule_name.isin(id_test_)]
validation_frame.to_parquet(DATA_DIR+'/parquet/fold_%s'%fold+'/validation.parquet')
train_frame.to_parquet(DATA_DIR +'/parquet/fold_%s'%fold+ '/train.parquet')
# save mapping
for i, (type_, frame) in enumerate(zip(grm.coupling_order, grm.training_maps)):
frame.to_csv(DATA_DIR +'/parquet/fold_%s'%fold+'/mapping_type_%s_order_%s.csv'%(type_, i), index=False)
return test_frametest_frame = save_cv_data(1, coupling_frame) %%time
folds = 4
for fold in range(1, folds):
print(coupling_frame.shape)
test_frame = save_cv_data(fold, coupling_frame) test_frame.to_parquet(DATA_DIR +'/parquet/test.parquet') | 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/build_data/build_train_validation_rnn_per_type.ipynb | from build_predictions import *
from GaussRank import GaussRankMap
import pandas as pd
from create_parquet import *
from data import *
import warnings
warnings.filterwarnings("ignore") DATA_DIR='/rapids/notebooks/srabhi/champs-2019/input/'COUPLING_TYPE# node frame
molecule_node = pd.read_parquet(DATA_DIR+'parquet/molecule_node.parquet')
# edge frame
molecule_edge = pd.read_parquet(DATA_DIR+'parquet/molecule_edge.parquet')from time import time
def save_cv_data(type_, fold):
print('type %s : %s' %(type_, COUPLING_TYPE[type_]))
# coupling frame of the type
coupling_frame = pd.read_csv(DATA_DIR+'parquet/baseline_coupling_frame.csv')
cols = ['molecule_name', 'num_coupling', 'coupling_dim', 'atom_index_0', 'atom_index_1', 'coupling_type', 'scalar_coupling',
'fc', 'sd', 'pso', 'dso', 'id']
coupling_frame = coupling_frame[cols]
coupling_frame = coupling_frame[coupling_frame.coupling_type == type_]
new_num_coupling = dict(coupling_frame.groupby('molecule_name').count()['num_coupling'])
coupling_frame.num_coupling = coupling_frame.molecule_name.map(new_num_coupling)
# shortest path
shortest_path_frame = pd.read_csv('/rapids/notebooks/srabhi/champs-2019/input/shortest_path.csv')
cols = [ 'id', 'molecule_name', 'atom_index_0', 'atom_index_1', 'path_index_0', 'path_index_1',
'path_index_2', 'path_index_3', 'path_btype_0', 'path_btype_1',
'path_btype_2', 'path_a_num_0', 'path_a_num_1', 'path_a_num_2',
'path_a_num_3']
shortest_path_frame = shortest_path_frame[cols]
coupling_frame = pd.merge(coupling_frame, shortest_path_frame, on=['id', 'molecule_name', 'atom_index_0', 'atom_index_1'], how='left')
print(coupling_frame.shape)
max_ = coupling_frame.num_coupling.max()
COUPLING_MAX = max_
print('max coupling: %s' %max_)
print('fold: %s' %fold)
split_train = 'train_split_by_mol_hash.%s.npy'%fold
split_valid = 'valid_split_by_mol_hash.%s.npy'%fold
id_train_ = np.load(DATA_DIR + '/split/%s'%split_train,allow_pickle=True)
id_valid_ = np.load(DATA_DIR + '/split/%s'%split_valid,allow_pickle=True)
csv = 'test'
df = pd.read_csv(DATA_DIR + '/csv/%s.csv'%csv)
id_test_ = df.molecule_name.unique()
train = coupling_frame[coupling_frame.molecule_name.isin(id_train_)]
validation = coupling_frame[coupling_frame.molecule_name.isin(id_valid_)]
test = coupling_frame[coupling_frame.molecule_name.isin(id_test_)]
# Get GaussRank of coupling values
t0 = time()
grm = GaussRankMap()
df_train = train[['coupling_type', 'scalar_coupling']]
df_valid = validation[['coupling_type', 'scalar_coupling']]
df_train.columns = ['type', 'scalar_coupling_constant']
df_valid.columns = ['type', 'scalar_coupling_constant']
# Reverse type mapping
df_train.type = df_train.type.map(REVERSE_COUPLING_TYPE)
df_valid.type = df_valid.type.map(REVERSE_COUPLING_TYPE)
#fit grm
transformed_training = grm.fit_training(df_train, reset=True)
transformed_validation = grm.convert_df(df_valid, from_coupling=True)
validation['gaussrank_coupling'] = transformed_validation
train['gaussrank_coupling'] = transformed_training
print('Getting gaussrank transformation for train/validation data took %s seconds' %(time()-t0))
print(grm.coupling_order)
test['gaussrank_coupling'] = 0
general_coupling_frame = pd.concat([train, validation, test.fillna(0.0)])
# Build molecule coupling frame for fold
coupling_cols = ['atom_index_0', 'atom_index_1', 'coupling_type', 'scalar_coupling', 'gaussrank_coupling',
'fc', 'sd', 'pso', 'dso', 'id',
'path_index_0', 'path_index_1', 'path_index_2','path_index_3',
'path_btype_0', 'path_btype_1', 'path_btype_2',
'path_a_num_0', 'path_a_num_1', 'path_a_num_2', 'path_a_num_3']
shared_cols = ['molecule_name', 'num_coupling', 'coupling_dim']
tmp = general_coupling_frame.groupby('molecule_name').apply(lambda x: x[coupling_cols].values.reshape(-1))
molecule_coupling = pd.DataFrame(tmp.values.tolist()).fillna(0.0)
molecule_coupling['molecule_name'] = tmp.index
molecule_coupling = molecule_coupling.merge(general_coupling_frame[shared_cols].drop_duplicates(), on='molecule_name', how='left')
cols = molecule_coupling.columns.tolist()
new_cols = cols[-3:] + cols[:-3]
molecule_coupling = molecule_coupling[new_cols]
print(molecule_coupling.shape)
molecule_coupling.columns = ['molecule_name', 'num_coupling', 'coupling_dim'] + ['coupling_%s'%i for i in range(COUPLING_MAX*21)]
node_edge_frame = pd.merge(molecule_node, molecule_edge, on='molecule_name', how='left')
general_stack_frame = pd.merge(node_edge_frame, molecule_coupling, on='molecule_name', how='inner')
train_frame = general_stack_frame[general_stack_frame.molecule_name.isin(id_train_)]
validation_frame = general_stack_frame[general_stack_frame.molecule_name.isin(id_valid_)]
test_frame = general_stack_frame[general_stack_frame.molecule_name.isin(id_test_)]
type_str = COUPLING_TYPE[type_]
os.makedirs(DATA_DIR+'rnn_parquet/fold_%s'%fold+'/%s'%type_str, exist_ok=True)
validation_frame.to_parquet(DATA_DIR+'rnn_parquet/fold_%s'%fold+'/%s/validation.parquet'%type_str)
train_frame.to_parquet(DATA_DIR +'rnn_parquet/fold_%s'%fold+ '/%s/train.parquet'%type_str)
# save mapping
for i, (str_type_, frame) in enumerate(zip(grm.coupling_order, grm.training_maps)):
frame.to_csv(DATA_DIR +'rnn_parquet/fold_%s'%fold+'/%s/mapping_%s_order_%s.csv'%(str_type_, str_type_, i), index=False)
return test_frame
COUPLING_MAX_DICT = {'1JHC': 20, '2JHC': 36, '3JHC': 66, '1JHN': 8, '2JHN': 12, '3JHN': 18, '3JHH': 36, '2JHH': 19 }for type_ in range(8)
for fold in range(4):
test_frame = save_cv_data(type_, fold)
test_frame.to_parquet(DATA_DIR +'/rnn_parquet/test_%s.parquet'%COUPLING_TYPE[type_]) | 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/build_data/baseline_coupling_frame_from_csv_cudf.ipynb | import cudf as gdpath = '/rapids/notebooks/srabhi/champs-2019/input/csv/'%%time
train = gd.read_csv('%s/train.csv'%path)
test = gd.read_csv('%s/test.csv'%path)
print(train.shape,test.shape)
test['scalar_coupling_constant'] = 0.0
for col in train.columns:
if train[col].dtype!='O':
train[col] = train[col].astype('float32')
if col in test.columns:
test[col] = test[col].astype('float32')
struct = gd.read_csv('%s/structures.csv'%path)
for col in struct.columns:
if struct[col].dtype!='O':
struct[col] = struct[col].astype('float32')
print(struct.shape)
contribs = gd.read_csv('%s/scalar_coupling_contributions.csv'%path)
for col in contribs.columns:
if contribs[col].dtype!='O':
contribs[col] = contribs[col].astype('float32')
print(contribs.shape)contribs.head().to_pandas()train.head().to_pandas()test.head().to_pandas()all_data = gd.concat([train, test])coupling_frame = gd.merge(all_data, contribs, on=['molecule_name', 'atom_index_0', 'atom_index_1', 'type'], how='left')count = coupling_frame.groupby('molecule_name').count().reset_index()[['molecule_name', 'id']]
count.columns = ['molecule_name', 'num_coupling']coupling_frame = coupling_frame.merge(count, on='molecule_name', how='left')coupling_frame['coupling_dim'] = 9coupling_frame.head(10).to_pandas()coupling_frame.to_csv('/rapids/notebooks/srabhi/champs-2019/input/parquet/baseline_coupling_frame.csv', index=False) | 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/build_data/build_baseline_dataframes.ipynb | from create_parquet import *
from data import *
import warnings
warnings.filterwarnings("ignore") def get_node_from_graph(molecule_file):
'''
- molecule file: path to %molecule_name.pickle
Returns:
Convert the pickled graph to a padded vector with all the molecule information
'''
molecule_name = molecule_file.split('/')[-1].strip('.pickle')
graph = read_pickle_from_file(molecule_file)
molecule_name = graph.molecule_name
node_feats = np.concatenate(graph.node,-1)
num_node, node_dim = node_feats.shape
node = pd.DataFrame(node_feats)
node.columns = ['symbol', 'acceptor', 'donor', 'aromatic', 'hybridization', 'num_h', 'atomic',]
node['num_nodes'] = num_node
node['node_dim'] = node_dim
node['molecule_name'] = molecule_name
node['atom_index'] = list(range(num_node))
return nodeget_node_from_graph('/rapids/notebooks/srabhi/champs-2019/input/structure/graph2/dsgdb9nsd_133885.pickle')from parallel_process import parallel_process
files = glob.glob('/rapids/notebooks/srabhi/champs-2019/input/structure/graph2/*.pickle')
frames = parallel_process(files, get_node_from_graph)node_frame = pd.concat(frames)node_frame.to_csv('/rapids/notebooks/srabhi/champs-2019/input/parquet/baseline_node_frame.csv', index=False)node_frame.head(2)def get_coupling_from_graph(molecule_file):
'''
- molecule file: path to %molecule_name.pickle
Returns:
Convert the pickled graph to a padded vector with all the molecule information
'''
molecule_name = molecule_file.split('/')[-1].strip('.pickle')
graph = read_pickle_from_file(molecule_file)
molecule_name = graph.molecule_name
coupling_feats = np.concatenate([graph.coupling.index, graph.coupling.type.reshape(-1, 1),
graph.coupling.value.reshape(-1,1), graph.coupling.contribution,
graph.coupling.id.reshape(-1,1)], -1)
num_coupling, coupling_dim = coupling_feats.shape
#change to cudf
coupling = pd.DataFrame(coupling_feats)
coupling.columns = ['atom_index_0', 'atom_index_1', 'coupling_type', 'scalar_coupling', 'fc', 'sd', 'pso', 'dso', 'id']
coupling['num_coupling'] = num_coupling
coupling['coupling_dim'] = coupling_dim
coupling['molecule_name'] = molecule_name
return couplingget_coupling_from_graph('/rapids/notebooks/srabhi/champs-2019/input/structure/graph2/dsgdb9nsd_103915.pickle').head(2)from parallel_process import parallel_process
files = glob.glob('/rapids/notebooks/srabhi/champs-2019/input/structure/graph2/*.pickle')
frames = parallel_process(files, get_coupling_from_graph)coupling_frame = pd.concat(frames)coupling_frame.head(3)coupling_frame.to_csv('/rapids/notebooks/srabhi/champs-2019/input/parquet/baseline_coupling_frame.csv', index=False)from data import *
def get_edge_from_graph(molecule_file):
'''
- molecule file: path to %molecule_name.pickle
Returns:
Convert the pickled graph to a padded vector with all the molecule information
'''
molecule_name = molecule_file.split('/')[-1].strip('.pickle')
graph = read_pickle_from_file(molecule_file)
molecule_name = graph.molecule_name
edge_feats = np.concatenate(graph.edge,-1)
edge_feats = np.concatenate([graph.edge_index, edge_feats], -1)
num_edge, edge_dim = edge_feats.shape
infor = [molecule_name, num_edge, edge_dim]
edge = pd.DataFrame(edge_feats)
edge.columns = ['atom_index_0', 'atom_index_1', 'edge_type', 'distance', 'angle']
edge['molecule_name'] = molecule_name
edge['num_edge'] = num_edge
edge['edge_dim'] = edge_dim
return edgefiles = glob.glob('/rapids/notebooks/srabhi/champs-2019/input/structure/graph2/*.pickle')
#t = get_edge_from_graph(molecule_file+molecule_name+'.pickle')from parallel_process import parallel_process
frames = parallel_process(files, get_edge_from_graph)edge_frame = pd.concat(frames)edge_frame.to_csv('/rapids/notebooks/srabhi/champs-2019/input/parquet/baseline_edge_frame.csv', index=False) | 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling/build_data | rapidsai_public_repos/deeplearning/champs-scalar-coupling/build_data/lib/include.py | import os
from datetime import datetime
PROJECT_PATH = os.path.dirname(os.path.realpath(__file__).replace('/lib',''))
IDENTIFIER = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
#numerical libs
import math
import numpy as np
import random
import PIL
#import cv2
import matplotlib
#matplotlib.use('TkAgg')
#matplotlib.use('WXAgg')
#matplotlib.use('Qt4Agg')
#matplotlib.use('Qt5Agg') #Qt4Agg
print('matplotlib.get_backend : ', matplotlib.get_backend())
#print(matplotlib.__version__)
# torch libs
import torch
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
from torch.utils.data.sampler import *
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.parallel.data_parallel import data_parallel
from torch.nn.utils.rnn import *
# std libs
import collections
import copy
import numbers
import inspect
import shutil
from timeit import default_timer as timer
import itertools
from collections import OrderedDict
from multiprocessing import Pool
import multiprocessing as mp
#from pprintpp import pprint, pformat
import json
import zipfile
import csv
import pandas as pd
import pickle
import glob
import sys
from distutils.dir_util import copy_tree
import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# constant #
PI = np.pi
INF = np.inf
EPS = 1e-12
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling/build_data/lib | rapidsai_public_repos/deeplearning/champs-scalar-coupling/build_data/lib/net/rate.py | # learning rate schduler
from lib.include import *
# http://elgoacademy.org/anatomy-matplotlib-part-1/
def plot_rates(fig, lrs, title=''):
N = len(lrs)
epoches = np.arange(0,N)
#get limits
max_lr = np.max(lrs)
xmin=0
xmax=N
dx=2
ymin=0
ymax=max_lr*1.2
dy=(ymax-ymin)/10
dy=10**math.ceil(math.log10(dy))
ax = fig.add_subplot(111)
#ax = fig.gca()
ax.set_axisbelow(True)
ax.minorticks_on()
ax.set_xticks(np.arange(xmin,xmax+0.0001, dx))
ax.set_yticks(np.arange(ymin,ymax+0.0001, dy))
ax.set_xlim(xmin,xmax+0.0001)
ax.set_ylim(ymin,ymax+0.0001)
ax.grid(b=True, which='minor', color='black', alpha=0.1, linestyle='dashed')
ax.grid(b=True, which='major', color='black', alpha=0.4, linestyle='dashed')
ax.set_xlabel('iter')
ax.set_ylabel('learning rate')
ax.set_title(title)
ax.plot(epoches, lrs)
## simple stepping rates
class StepScheduler():
def __init__(self, pairs):
super(StepScheduler, self).__init__()
N=len(pairs)
rates=[]
steps=[]
for n in range(N):
steps.append(pairs[n][0])
rates.append(pairs[n][1])
self.rates = rates
self.steps = steps
def __call__(self, epoch):
N = len(self.steps)
lr = -1
for n in range(N):
if epoch >= self.steps[n]:
lr = self.rates[n]
return lr
def __str__(self):
string = 'Step Learning Rates\n' \
+ 'rates=' + str(['%7.4f' % i for i in self.rates]) + '\n' \
+ 'steps=' + str(['%7.0f' % i for i in self.steps]) + ''
return string
## https://github.com/pytorch/tutorials/blob/master/beginner_source/transfer_learning_tutorial.py
class DecayScheduler():
def __init__(self, base_lr, decay, step):
super(DecayScheduler, self).__init__()
self.step = step
self.decay = decay
self.base_lr = base_lr
def get_rate(self, epoch):
lr = self.base_lr * (self.decay**(epoch // self.step))
return lr
def __str__(self):
string = '(Exp) Decay Learning Rates\n' \
+ 'base_lr=%0.3f, decay=%0.3f, step=%0.3f'%(self.base_lr, self.decay, self.step)
return string
# 'Cyclical Learning Rates for Training Neural Networks'- Leslie N. Smith, arxiv 2017
# https://arxiv.org/abs/1506.01186
# https://github.com/bckenstler/CLR
class CyclicScheduler1():
def __init__(self, min_lr=0.001, max_lr=0.01, period=10 ):
super(CyclicScheduler, self).__init__()
self.min_lr = min_lr
self.max_lr = max_lr
self.period = period
def __call__(self, time):
#sawtooth
#r = (1-(time%self.period)/self.period)
#cosine
time= time%self.period
r = (np.cos(time/self.period *PI)+1)/2
lr = self.min_lr + r*(self.max_lr-self.min_lr)
return lr
def __str__(self):
string = 'CyclicScheduler\n' \
+ 'min_lr=%0.3f, max_lr=%0.3f, period=%8.1f'%(self.min_lr, self.max_lr, self.period)
return string
class CyclicScheduler2():
def __init__(self, min_lr=0.001, max_lr=0.01, period=10, max_decay=0.99, warm_start=0 ):
super(CyclicScheduler, self).__init__()
self.min_lr = min_lr
self.max_lr = max_lr
self.period = period
self.max_decay = max_decay
self.warm_start = warm_start
self.cycle = -1
def __call__(self, time):
if time<self.warm_start: return self.max_lr
#cosine
self.cycle = (time-self.warm_start)//self.period
time = (time-self.warm_start)%self.period
period = self.period
min_lr = self.min_lr
max_lr = self.max_lr *(self.max_decay**self.cycle)
r = (np.cos(time/period *PI)+1)/2
lr = min_lr + r*(max_lr-min_lr)
return lr
def __str__(self):
string = 'CyclicScheduler\n' \
+ 'min_lr=%0.4f, max_lr=%0.4f, period=%8.1f'%(self.min_lr, self.max_lr, self.period)
return string
#tanh curve
class CyclicScheduler3():
def __init__(self, min_lr=0.001, max_lr=0.01, period=10, max_decay=0.99, warm_start=0 ):
super(CyclicScheduler, self).__init__()
self.min_lr = min_lr
self.max_lr = max_lr
self.period = period
self.max_decay = max_decay
self.warm_start = warm_start
self.cycle = -1
def __call__(self, time):
if time<self.warm_start: return self.max_lr
#cosine
self.cycle = (time-self.warm_start)//self.period
time = (time-self.warm_start)%self.period
period = self.period
min_lr = self.min_lr
max_lr = self.max_lr *(self.max_decay**self.cycle)
r = (np.tanh(-time/period *16 +8)+1)*0.5
lr = min_lr + r*(max_lr-min_lr)
return lr
def __str__(self):
string = 'CyclicScheduler\n' \
+ 'min_lr=%0.3f, max_lr=%0.3f, period=%8.1f'%(self.min_lr, self.max_lr, self.period)
return string
#
# class CyclicScheduler():
#
# def __init__(self, pairs, period=10, max_decay=1, warm_start=0 ):
# super(CyclicScheduler, self).__init__()
#
# self.lrs=[]
# self.steps=[]
# for p in pairs:
# self.steps.append(p[0])
# self.lrs.append(p[1])
#
#
# self.period = period
# self.warm_start = warm_start
# self.max_decay = max_decay
# self.cycle = -1
#
# def __call__(self, time):
# if time<self.warm_start: return self.lrs[0]
#
# self.cycle = (time-self.warm_start)//self.period
# time = (time-self.warm_start)%self.period
#
# rates = self.lrs.copy()
# steps = self.steps
# rates[0] = rates[0] *(self.max_decay**self.cycle)
# lr = -1
# for rate,step in zip(rates,steps):
# if time >= step:
# lr = rate
#
# return lr
#
#
#
# def __str__(self):
# string = 'CyclicScheduler\n' \
# + 'lrs =' + str(['%7.4f' % i for i in self.lrs]) + '\n' \
# + 'steps=' + str(['%7.0f' % i for i in self.steps]) + '\n' \
# + 'period=%8.1f'%(self.period)
# return string
class NullScheduler():
def __init__(self, lr=0.01 ):
super(NullScheduler, self).__init__()
self.lr = lr
self.cycle = 0
def __call__(self, time):
return self.lr
def __str__(self):
string = 'NullScheduler\n' \
+ 'lr=%0.5f '%(self.lr)
return string
# net ------------------------------------
# https://github.com/pytorch/examples/blob/master/imagenet/main.py ###############
def adjust_learning_rate(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def get_learning_rate(optimizer):
lr=[]
for param_group in optimizer.param_groups:
lr +=[ param_group['lr'] ]
assert(len(lr)==1) #we support only one param_group
lr = lr[0]
return lr
# main #################################################################
if __name__ == '__main__':
print( '%s: calling main function ... ' % os.path.basename(__file__))
num_iters=125
scheduler = StepScheduler([ (0,0.1), (10,0.01), (25,0.005), (35,0.001), (40,0.0001), (43,-1)])
#scheduler = DecayScheduler(base_lr=0.1, decay=0.32, step=10)
#scheduler = CyclicScheduler(min_lr=0.0001, max_lr=0.01, period=30., warm_start=5) ##exp_range ##triangular2
#scheduler = CyclicScheduler([ (0,0.1), (25,0.01), (45,0.005)], period=50., warm_start=5) ##exp_range ##triangular2
lrs = np.zeros((num_iters),np.float32)
for iter in range(num_iters):
lr = scheduler(iter)
lrs[iter] = lr
if lr<0:
num_iters = iter
break
#print ('iter=%02d, lr=%f %d'%(iter,lr, scheduler.cycle))
#plot
fig = plt.figure()
plot_rates(fig, lrs, title=str(scheduler))
plt.show()
# https://github.com/Jiaming-Liu/pytorch-lr-scheduler/blob/master/lr_scheduler.py
# PVANET plateau lr policy
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling/build_data/lib | rapidsai_public_repos/deeplearning/champs-scalar-coupling/build_data/lib/utility/draw.py | import os
#qt bug ???
os.environ['QT_XKB_CONFIG_ROOT']='/usr/share/X11/xkb/'
from lib.include import *
import matplotlib.cm
# draw -----------------------------------
def image_show(name, image, resize=1):
H,W = image.shape[0:2]
cv2.namedWindow(name, cv2.WINDOW_GUI_NORMAL) #WINDOW_NORMAL
#cv2.namedWindow(name, cv2.WINDOW_GUI_EXPANDED) #WINDOW_GUI_EXPANDED
cv2.imshow(name, image.astype(np.uint8))
cv2.resizeWindow(name, round(resize*W), round(resize*H))
def image_show_norm(name, image, max=None, min=None, resize=1):
if max is None: max=image.max()
if min is None: min=image.min()
H,W = image.shape[0:2]
cv2.namedWindow(name, cv2.WINDOW_GUI_NORMAL) #WINDOW_NORMAL
cv2.imshow(name, ((image-min)/(max-min)*255).astype(np.uint8))
cv2.resizeWindow(name, round(resize*W), round(resize*H))
def draw_shadow_text(img, text, pt, fontScale, color, thickness, color1=None, thickness1=None):
if color1 is None: color1=(0,0,0)
if thickness1 is None: thickness1 = thickness+2
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, text, pt, font, fontScale, color1, thickness1, cv2.LINE_AA)
cv2.putText(img, text, pt, font, fontScale, color, thickness, cv2.LINE_AA)
def to_color_image(image, max=None):
if max is None: max=image.max()
image = (image/max*255).astype(np.uint8)
image = cv2.cvtColor(image,cv2.COLOR_GRAY2BGR)
return image
##http://stackoverflow.com/questions/26690932/opencv-rectangle-with-dotted-or-dashed-lines
def draw_dotted_line(image, pt1, pt2, color, thickness=1, gap=20):
dist =((pt1[0]-pt2[0])**2+(pt1[1]-pt2[1])**2)**.5
pts= []
for i in np.arange(0,dist,gap):
r=i/dist
x=int((pt1[0]*(1-r)+pt2[0]*r)+.5)
y=int((pt1[1]*(1-r)+pt2[1]*r)+.5)
p = (x,y)
pts.append(p)
if gap==1:
for p in pts:
cv2.circle(image,p,thickness,color,-1,cv2.LINE_AA)
else:
def pairwise(iterable):
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return zip(a, a)
for p, q in pairwise(pts):
cv2.line(image,p, q, color,thickness,cv2.LINE_AA)
def draw_dotted_poly(image, pts, color, thickness=1, gap=20):
s=pts[0]
e=pts[0]
pts.append(pts.pop(0))
for p in pts:
s=e
e=p
draw_dotted_line(image,s,e,color,thickness,gap)
def draw_dotted_rect(image, pt1, pt2, color, thickness=1, gap=3):
pts = [pt1,(pt2[0],pt1[1]),pt2,(pt1[0],pt2[1])]
draw_dotted_poly(image, pts, color, thickness, gap)
def draw_screen_rect(image, pt1, pt2, color, alpha=0.5):
x1, y1 = pt1
x2, y2 = pt2
image[y1:y2,x1:x2,:] = (1-alpha)*image[y1:y2,x1:x2,:] + (alpha)*np.array(color, np.uint8)
# def draw_mask(image, mask, color=(255,255,255), α=1, β=0.25, λ=0., threshold=32 ):
# # image * α + mask * β + λ
#
# if threshold is None:
# mask = mask/255
# else:
# mask = clean_mask(mask,threshold,1)
#
# mask = np.dstack((color[0]*mask,color[1]*mask,color[2]*mask)).astype(np.uint8)
# image[...] = cv2.addWeighted(image, α, mask, β, λ)
#
# def draw_contour(image, mask, color=(0,255,0), thickness=1, threshold=127):
# ret, thresh = cv2.threshold(mask,threshold,255,0)
# ret = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# hierarchy = ret[0]
# contours = ret[1]
# #image[...]=image
# cv2.drawContours(image, contours, -1, color, thickness, cv2.LINE_AA)
# ## drawContours(image, contours, contourIdx, color, thickness=None, lineType=None, hierarchy=None, maxLevel=None, offset=None): # real signature unknown; restored from __doc__
#
#
def to_color(s, color=None):
if type(color) in [str] or color is None:
#https://matplotlib.org/xkcd/examples/color/colormaps_reference.html
if color is None: color='cool'
color = matplotlib.get_cmap(color)(s)
b = int(255*color[2])
g = int(255*color[1])
r = int(255*color[0])
elif type(color) in [list,tuple]:
b = int(s*color[0])
g = int(s*color[1])
r = int(s*color[2])
return b,g,r
# main #################################################################
if __name__ == '__main__':
print( '%s: calling main function ... ' % os.path.basename(__file__))
image = np.zeros((50,50,3), np.uint8)
cv2.rectangle(image, (0,0),(49,49), (0,0,255),1) #inclusive
image[8,8]=[255,255,255]
image_show('image',image,10)
cv2.waitKey(0)
print('\nsucess!') | 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling/build_data/lib | rapidsai_public_repos/deeplearning/champs-scalar-coupling/build_data/lib/utility/file.py | from lib.include import *
import builtins
import re
class Struct(object):
def __init__(self, is_copy=False, **kwargs):
self.add(is_copy, **kwargs)
def add(self, is_copy=False, **kwargs):
#self.__dict__.update(kwargs)
if is_copy == False:
for key, value in kwargs.items():
setattr(self, key, value)
else:
for key, value in kwargs.items():
try:
setattr(self, key, copy.deepcopy(value))
#setattr(self, key, value.copy())
except Exception:
setattr(self, key, value)
def __str__(self):
return str(self.__dict__.keys())
# log ------------------------------------
def remove_comments(lines, token='#'):
""" Generator. Strips comments and whitespace from input lines.
"""
l = []
for line in lines:
s = line.split(token, 1)[0].strip()
if s != '':
l.append(s)
return l
def open(file, mode=None, encoding=None):
if mode == None: mode = 'r'
if '/' in file:
if 'w' or 'a' in mode:
dir = os.path.dirname(file)
if not os.path.isdir(dir): os.makedirs(dir)
f = builtins.open(file, mode=mode, encoding=encoding)
return f
def remove(file):
if os.path.exists(file): os.remove(file)
def empty(dir):
if os.path.isdir(dir):
shutil.rmtree(dir, ignore_errors=True)
else:
os.makedirs(dir)
# http://stackoverflow.com/questions/34950201/pycharm-print-end-r-statement-not-working
class Logger(object):
def __init__(self):
self.terminal = sys.stdout #stdout
self.file = None
def open(self, file, mode=None):
if mode is None: mode ='w'
self.file = open(file, mode)
def write(self, message, is_terminal=1, is_file=1 ):
if '\r' in message: is_file=0
if is_terminal == 1:
self.terminal.write(message)
self.terminal.flush()
#time.sleep(1)
if is_file == 1:
self.file.write(message)
self.file.flush()
def flush(self):
# this flush method is needed for python 3 compatibility.
# this handles the flush command by doing nothing.
# you might want to specify some extra behavior here.
pass
# io ------------------------------------
def write_list_to_file(list_file, strings):
with open(list_file, 'w') as f:
for s in strings:
f.write('%s\n'%str(s))
pass
def read_list_from_file(list_file, comment='#'):
with open(list_file) as f:
lines = f.readlines()
strings=[]
for line in lines:
if comment is not None:
s = line.split(comment, 1)[0].strip()
else:
s = line.strip()
if s != '':
strings.append(s)
return strings
def read_pickle_from_file(pickle_file):
with open(pickle_file,'rb') as f:
x = pickle.load(f)
return x
def write_pickle_to_file(pickle_file, x):
with open(pickle_file, 'wb') as f:
pickle.dump(x, f, pickle.HIGHEST_PROTOCOL)
# backup ------------------------------------
#https://stackoverflow.com/questions/1855095/how-to-create-a-zip-archive-of-a-directory
def backup_project_as_zip(project_dir, zip_file):
assert(os.path.isdir(project_dir))
assert(os.path.isdir(os.path.dirname(zip_file)))
shutil.make_archive(zip_file.replace('.zip',''), 'zip', project_dir)
pass
# etc ------------------------------------
def time_to_str(t, mode='min'):
if mode=='min':
t = int(t)/60
hr = t//60
min = t%60
return '%2d hr %02d min'%(hr,min)
elif mode=='sec':
t = int(t)
min = t//60
sec = t%60
return '%2d min %02d sec'%(min,sec)
else:
raise NotImplementedError
def np_float32_to_uint8(x, scale=255):
return (x*scale).astype(np.uint8)
def np_uint8_to_float32(x, scale=255):
return (x/scale).astype(np.float32)
def int_tuple(x):
return tuple( [int(round(xx)) for xx in x] )
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/parallel_process.py | from tqdm import tqdm
from concurrent.futures import ProcessPoolExecutor, as_completed
"""
Credit to http://danshiebler.com
"""
def parallel_process(array, function, n_jobs=16, use_kwargs=False, front_num=3):
"""
A parallel version of the map function with a progress bar.
Args:
array (array-like): An array to iterate over.
function (function): A python function to apply to the elements of array
n_jobs (int, default=16): The number of cores to use
use_kwargs (boolean, default=False): Whether to consider the elements of array as dictionaries of
keyword arguments to function
front_num (int, default=3): The number of iterations to run serially before kicking off the parallel job.
Useful for catching bugs
Returns:
[function(array[0]), function(array[1]), ...]
"""
#We run the first few iterations serially to catch bugs
if front_num > 0:
front = [function(**a) if use_kwargs else function(a) for a in array[:front_num]]
#If we set n_jobs to 1, just run a list comprehension. This is useful for benchmarking and debugging.
if n_jobs==1:
return front + [function(**a) if use_kwargs else function(a) for a in tqdm(array[front_num:])]
#Assemble the workers
with ProcessPoolExecutor(max_workers=n_jobs) as pool:
#Pass the elements of array into function
if use_kwargs:
futures = [pool.submit(function, **a) for a in array[front_num:]]
else:
futures = [pool.submit(function, a) for a in array[front_num:]]
kwargs = {
'total': len(futures),
'unit': 'it',
'unit_scale': True,
'leave': True
}
#Print out the progress as tasks complete
for f in tqdm(as_completed(futures), **kwargs):
pass
out = []
#Get the results from the futures.
for i, future in tqdm(enumerate(futures)):
try:
out.append(future.result())
except Exception as e:
out.append(e)
return front + out | 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/common.py | from .lib.include import *
from .lib.utility.draw import *
from .lib.utility.file import *
from .lib.net.rate import *
#---------------------------------------------------------------------------------
COMMON_STRING ='@%s: \n' % os.path.basename(__file__)
if 1:
SEED = int(time.time()) #35202 #35202 #123 #
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
COMMON_STRING += '\tset random seed\n'
COMMON_STRING += '\t\tSEED = %d\n'%SEED
torch.backends.cudnn.benchmark = True ##uses the inbuilt cudnn auto-tuner to find the fastest convolution algorithms. -
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = True
COMMON_STRING += '\tset cuda environment\n'
COMMON_STRING += '\t\ttorch.__version__ = %s\n'%torch.__version__
COMMON_STRING += '\t\ttorch.version.cuda = %s\n'%torch.version.cuda
COMMON_STRING += '\t\ttorch.backends.cudnn.version() = %s\n'%torch.backends.cudnn.version()
try:
COMMON_STRING += '\t\tos[\'CUDA_VISIBLE_DEVICES\'] = %s\n'%os.environ['CUDA_VISIBLE_DEVICES']
NUM_CUDA_DEVICES = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))
except Exception:
COMMON_STRING += '\t\tos[\'CUDA_VISIBLE_DEVICES\'] = None\n'
NUM_CUDA_DEVICES = 1
COMMON_STRING += '\t\ttorch.cuda.device_count() = %d\n'%torch.cuda.device_count()
#print ('\t\ttorch.cuda.current_device() =', torch.cuda.current_device())
COMMON_STRING += '\n'
#---------------------------------------------------------------------------------
## useful : http://forums.fast.ai/t/model-visualization/12365/2
if __name__ == '__main__':
print (COMMON_STRING) | 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/train_loss.py | import numpy as np
import pandas as pd
import torch
from torch import nn
import torch.nn.functional as F
#############################################################################################################
# #
# Loss functions #
# #
#############################################################################################################
# lmae for single model: 1-type prediction
def lmae_criterion(predict, coupling_value, coupling_rank, coupling_contribution, coupling_type,):
'''
lmae between regression predictions and true scalar coupling constant
'''
coupling_preds, contribution_preds, type_preds = predict
predict = coupling_preds.view(-1)
truth = coupling_value.view(-1)
assert(predict.shape==truth.shape)
loss = torch.abs(predict-truth)
loss = loss.mean()
loss = torch.log(loss+1e-8)
return loss
def lmae(coupling_preds, coupling_value):
predict = coupling_preds.view(-1)
truth = coupling_value.view(-1)
assert(predict.shape==truth.shape)
loss = torch.abs(predict-truth)
loss = loss.mean()
loss = torch.log(loss+1e-8)
return loss
# lmae for multi-type model
def train_criterion(predict,
coupling_value, coupling_rank, coupling_contribution, coupling_type ,
criterion='lmae',
num_output= 1,
gaussrank=True,
pred_type= False):
'''
The loss to be used for training the model w.r.t to flags: pred_type, num_output
TODO : Include per-type loss training
'''
coupling_preds, contribution_preds, type_preds = predict
if not gaussrank:
coupling_rank = coupling_value
# fix the regression loss to use : mse or lmae
if criterion == 'mse':
l = nn.MSELoss()
elif criterion == 'lmae':
l = lmae_criterion
elif criterion == 'mlmae2ce':
cross_entropy_loss = torch.nn.CrossEntropyLoss()(type_preds, coupling_type)
abs_diff = torch.abs(coupling_preds - coupling_rank.view(-1,1).expand(coupling_preds.size()))
if criterion == 'mse':
abs_diff = abs_diff**2
proba_types = F.softmax(type_preds)
weighted_diff = torch.mul(abs_diff, proba_types).sum(dim=1)
unique_labels, labels_count = coupling_type.unique(dim=0, return_counts=True)
res = torch.zeros(unique_labels.max()+1, dtype=torch.float, device='cuda')
res = res.scatter_add_(0, coupling_type, weighted_diff)
res = res[unique_labels]
res = res.div(labels_count.float())
res = res.log().mean()
return res + 2 * cross_entropy_loss
elif criterion == 'mlmaeo2ce':
cross_entropy_loss = torch.nn.CrossEntropyLoss()(type_preds, coupling_type)
abs_diff = torch.abs(coupling_preds - coupling_rank.view(-1,1).expand(coupling_preds.size()))
proba_types = F.softmax(type_preds)
weighted_diff = torch.mul(abs_diff, proba_types).sum(dim=1)
unique_labels, labels_count = coupling_type.unique(dim=0, return_counts=True)
res = torch.zeros(unique_labels.max()+1, dtype=torch.float, device='cuda')
res = res.scatter_add_(0, coupling_type, weighted_diff)
res = res[unique_labels]
res = res.div(labels_count.float())
res = res.log().mean()
if cross_entropy_loss >= 0.05:
return res + 2 * cross_entropy_loss
else:
return res
elif criterion == 'mlmaeo2ceh':
cross_entropy_loss = torch.nn.CrossEntropyLoss()(type_preds, coupling_type)
abs_diff = torch.abs(coupling_preds - coupling_rank.view(-1,1).expand(coupling_preds.size()))
proba_types = F.softmax(type_preds)
weighted_diff = torch.sum((torch.index_select(abs_diff,1,torch.argmax(type_preds, dim=1))*torch.eye(len(coupling_type), device='cuda')), dim=1)
unique_labels, labels_count = coupling_type.unique(dim=0, return_counts=True)
res = torch.zeros(unique_labels.max()+1, dtype=torch.float, device='cuda')
res = res.scatter_add_(0, coupling_type, weighted_diff)
res = res[unique_labels]
res = res.div(labels_count.float())
res = res.log().mean()
if cross_entropy_loss >= 0.05:
return res + 2 * cross_entropy_loss
else:
return res
elif criterion == 'mlmaeo2ceha' or criterion == 'wmlmaeo2ceha':
cross_entropy_loss = torch.nn.CrossEntropyLoss()(type_preds, coupling_type)
abs_diff = torch.abs(coupling_preds - coupling_rank.view(-1,1).expand(coupling_preds.size()))
proba_types = F.softmax(type_preds)
weighted_diff = torch.sum((torch.index_select(abs_diff,1,coupling_type)*torch.eye(len(coupling_type), device='cuda')), dim=1)
unique_labels, labels_count = coupling_type.unique(dim=0, return_counts=True)
res = torch.zeros(unique_labels.max()+1, dtype=torch.float, device='cuda')
res = res.scatter_add_(0, coupling_type, weighted_diff)
if criterion == 'wmlmaeo2ceha':
res = res * torch.tensor([10.,.1,.1,.1,.1,.1,.1,.1], dtype=torch.float, device='cuda')
res = res[unique_labels]
res = res.div(labels_count.float())
if criterion == 'wmlmaeo2ceha':
res = res*res
res = res.mean()
else:
res = res.log().mean()
if cross_entropy_loss >= 0.05:
return res + 2 * cross_entropy_loss
else:
return res
elif criterion == 'lmaeo2ceha':
cross_entropy_loss = torch.nn.CrossEntropyLoss()(type_preds, coupling_type)
abs_diff = torch.abs(coupling_preds - coupling_rank.view(-1,1).expand(coupling_preds.size()))
proba_types = F.softmax(type_preds)
weighted_diff = torch.sum((torch.index_select(abs_diff,1,coupling_type)*torch.eye(len(coupling_type), device='cuda')), dim=1)
res = torch.log(weighted_diff.mean())
if cross_entropy_loss >= 0.05:
return res + 2 * cross_entropy_loss
else:
return res
elif criterion == 'lmae_embed_type':
return lmae(coupling_preds, coupling_rank)
else:
raise Exception(f"""{criterion} is not handled""")
if pred_type:
cross_entropy_loss = torch.nn.CrossEntropyLoss()(type_preds, coupling_type)
abs_diff = torch.abs(coupling_preds - coupling_rank.view(-1,1).expand(coupling_preds.size()))
if criterion == 'mse':
abs_diff = abs_diff**2
proba_types = F.softmax(type_preds)
weighted_diff = torch.mul(abs_diff, proba_types).sum(dim=1)
weighted_loss = torch.log(weighted_diff.mean())
weighted_loss = weighted_loss + 2 * cross_entropy_loss
return weighted_loss
elif num_output == 5:
loss_coupling = l(coupling_preds, coupling_rank)
loss_fc = l(contribution_preds[:, 0], coupling_contribution[:, 0])
loss_sd = l(contribution_preds[:, 1], coupling_contribution[:, 1])
loss_pso = l(contribution_preds[:, 2], coupling_contribution[:, 2])
loss_dso = l(contribution_preds[:, 3], coupling_contribution[:, 3])
return loss_coupling + (0.1 * (loss_fc + loss_sd + loss_pso + loss_dso) / 4)
elif num_output ==1 :
return l(coupling_preds, coupling_rank)
else:
raise Exception(f"""{num_output} is not handled""") | 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/data.py | #
#
#
# This module aims to create molecule graphs from Kaggle data and rdkit
#
# It also give the possibilit to create cv folds as .npy files with molecule names
#
#
#
#####################################################################################
#from atom_features import *
from collections import defaultdict
import networkx as nx
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import ChemicalFeatures
from rdkit import RDConfig
import rdkit.Chem.Draw
from rdkit.Chem.Draw.MolDrawing import MolDrawing, DrawingOptions
DrawingOptions.bondLineWidth=1.8
from rdkit.Chem.rdmolops import SanitizeFlags
import os
from functools import partial
import argparse
import pandas as pd
import cudf as gd
import numpy as np
import scipy
from sklearn import preprocessing
# __all__ = ['make_graph', 'do_one', 'run_convert_to_graph', 'run_make_split' ]
## Helpers for feature extraction #####################################################
COUPLING_TYPE_STATS=[
#type #mean, std, min, max
'1JHC', 94.9761528641869, 18.27722399839607, 66.6008, 204.8800,
'2JHC', -0.2706244378832, 4.52360876732858, -36.2186, 42.8192,
'3JHC', 3.6884695895355, 3.07090647005439, -18.5821, 76.0437,
'1JHN', 47.4798844844683, 10.92204561670947, 24.3222, 80.4187,
'2JHN', 3.1247536134185, 3.67345877025737, -2.6209, 17.7436,
'3JHN', 0.9907298624944, 1.31538940138001, -3.1724, 10.9712,
'2JHH', -10.2866051639817, 3.97960190019757, -35.1761, 11.8542,
'3JHH', 4.7710233597359, 3.70498129755812, -3.0205, 17.4841,
]
NUM_COUPLING_TYPE = len(COUPLING_TYPE_STATS)//5
COUPLING_TYPE_MEAN = [ COUPLING_TYPE_STATS[i*5+1] for i in range(NUM_COUPLING_TYPE)]
COUPLING_TYPE_STD = [ COUPLING_TYPE_STATS[i*5+2] for i in range(NUM_COUPLING_TYPE)]
COUPLING_TYPE = [ COUPLING_TYPE_STATS[i*5 ] for i in range(NUM_COUPLING_TYPE)]
#--- Set of Categorical modalities
SYMBOL = ['H', 'C', 'N', 'O', 'F']
BOND_TYPE = [
Chem.rdchem.BondType.SINGLE,
Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE,
Chem.rdchem.BondType.AROMATIC,
]
HYBRIDIZATION=[
#Chem.rdchem.HybridizationType.S,
Chem.rdchem.HybridizationType.SP,
Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3,
#Chem.rdchem.HybridizationType.SP3D,
#Chem.rdchem.HybridizationType.SP3D2,
]
def one_hot_encoding(x, set):
"""
One-Hot Encode categorical variables
"""
one_hot = [int(x == s) for s in set]
if 0:
if sum(one_hot)==0: print('one_hot_encoding() return NULL!', x, set)
return one_hot
def label_encoding(x, set):
"""
Encode categorical variables to int Ids
"""
try:
return set.index(x)+1
except:
return 0
''' Graph Structure
node_feature :
category
(symbol,SYMBOL) #5
(acceptor,) #1
(donor, ) #1
(aromatic,) #1
one_hot_encoding(hybridization,HYBRIDIZATION) #3
real
(num_h, ) #1
(atomic, ) #1
edge_feature :
category
(bond_type,BOND_TYPE) #4
real
np.digitize(distance,DISTANCE) #1
angle #1
coupling: Structure
id:
contributions:
index:
type:
value:
'''
#############################################################################################################
# #
# Molecule graph representation #
# #
#############################################################################################################
def make_graph(molecule_name, gb_structure, gb_scalar_coupling,
categorical_encoding='one_hot', normalize_coupling=False, rank=False) :
"""
make_graph --> returns graph as 'Struct' object (see /lib/utility/file.py)
Args:
- molecule_name : (str)
- gb_structure (DataFrame GroupBy): groupby structure: data groupped by molecule name
- gb_scalar_coupling (DataFrame GroupBy): The coupling contributions data groupped by molecule name
- categorical_encoding (str): How represent categorical variables : label vs one-hot enconding
- rank: Transform values into norma distribution
"""
#---- Coupling informatiom
# ['id', 'molecule_name', 'atom_index_0', 'atom_index_1', 'type', 'scalar_coupling_constant', 'fc', 'sd', 'pso', 'dso'],
df = gb_scalar_coupling.get_group(molecule_name)
coupling_index = np.array([ COUPLING_TYPE.index(t) for t in df.type.values ], np.int32)
scalar_coupling_constant = df.scalar_coupling_constant.values
if normalize_coupling:
coupling_mean = np.array([COUPLING_TYPE_MEAN[x] for x in coupling_index], np.float32)
coupling_std = np.array([COUPLING_TYPE_STD[x] for x in coupling_index], np.float32)
scalar_coupling_constant = (scalar_coupling_constant - coupling_mean) / coupling_std
if rank:
scalar_tranform = df.transform.values
coupling = Struct(
id = df.id.values,
contribution = df[['fc', 'sd', 'pso', 'dso']].values,
index = df[['atom_index_0', 'atom_index_1']].values,
type = coupling_index,
value = scalar_coupling_constant,
)
#---- Molecule structure information
df = gb_structure.get_group(molecule_name)
df = df.sort_values(['atom_index'], ascending=True)
# ['molecule_name', 'atom_index', 'atom', 'x', 'y', 'z']
a = df.atom.values.tolist()
xyz = df[['x','y','z']].values
mol = mol_from_axyz(a, xyz)
#---
assert( #check
a == [ mol.GetAtomWithIdx(i).GetSymbol() for i in range(mol.GetNumAtoms())]
)
#--- Atoms information
factory = ChemicalFeatures.BuildFeatureFactory(os.path.join(RDConfig.RDDataDir, 'BaseFeatures.fdef'))
feature = factory.GetFeaturesForMol(mol)
if categorical_encoding =='one_hot':
## ** node features **
num_atom = mol.GetNumAtoms()
symbol = np.zeros((num_atom,len(SYMBOL)),np.uint8) #category
acceptor = np.zeros((num_atom,1),np.uint8) #bool
donor = np.zeros((num_atom,1),np.uint8) #bool
aromatic = np.zeros((num_atom,1),np.uint8) #bool
hybridization = np.zeros((num_atom,len(HYBRIDIZATION)),np.uint8) #category
num_h = np.zeros((num_atom,1),np.float32) #real
atomic = np.zeros((num_atom,1),np.float32) #real
for i in range(num_atom):
atom = mol.GetAtomWithIdx(i)
symbol[i] = one_hot_encoding(atom.GetSymbol(),SYMBOL)
aromatic[i] = atom.GetIsAromatic()
hybridization[i] = one_hot_encoding(atom.GetHybridization(),HYBRIDIZATION)
num_h[i] = atom.GetTotalNumHs(includeNeighbors=True)
atomic[i] = atom.GetAtomicNum()
for t in range(0, len(feature)):
if feature[t].GetFamily() == 'Donor':
for i in feature[t].GetAtomIds():
donor[i] = 1
elif feature[t].GetFamily() == 'Acceptor':
for i in feature[t].GetAtomIds():
acceptor[i] = 1
## ** edge features **
num_edge = num_atom*num_atom - num_atom
edge_index = np.zeros((num_edge,2), np.uint8) # int tuples
bond_type = np.zeros((num_edge,len(BOND_TYPE)), np.uint8) #category
distance = np.zeros((num_edge,1),np.float32) #real
angle = np.zeros((num_edge,1),np.float32) #real
relative_angle = np.zeros((num_edge,1),np.float32) #real
norm_xyz = preprocessing.normalize(xyz, norm='l2')
ij=0
for i in range(num_atom):
for j in range(num_atom):
if i==j: continue
edge_index[ij] = [i,j]
bond = mol.GetBondBetweenAtoms(i, j)
if bond is not None:
bond_type[ij] = one_hot_encoding(bond.GetBondType(),BOND_TYPE)
distance[ij] = ((xyz[i] - xyz[j])**2).sum()**0.5
angle[ij] = (norm_xyz[i]*norm_xyz[j]).sum()
ij+=1
elif categorical_encoding =='label':
## ** node features **
num_atom = mol.GetNumAtoms()
symbol = np.zeros((num_atom,1),np.uint8) #category
acceptor = np.zeros((num_atom,1),np.uint8) #bool
donor = np.zeros((num_atom,1),np.uint8) #bool
aromatic = np.zeros((num_atom,1),np.uint8) #bool
hybridization = np.zeros((num_atom,1),np.uint8) #category
num_h = np.zeros((num_atom,1),np.float32) #real
atomic = np.zeros((num_atom,1),np.float32) #real
for i in range(num_atom):
atom = mol.GetAtomWithIdx(i)
symbol[i] = label_encoding(atom.GetSymbol(), SYMBOL)
aromatic[i] = atom.GetIsAromatic()
hybridization[i] = label_encoding(atom.GetHybridization(),HYBRIDIZATION)
num_h[i] = atom.GetTotalNumHs(includeNeighbors=True)
atomic[i] = atom.GetAtomicNum()
for t in range(0, len(feature)):
if feature[t].GetFamily() == 'Donor':
for i in feature[t].GetAtomIds():
donor[i] = 1
elif feature[t].GetFamily() == 'Acceptor':
for i in feature[t].GetAtomIds():
acceptor[i] = 1
## ** edge features **
num_edge = num_atom*num_atom - num_atom
edge_index = np.zeros((num_edge,2), np.uint8) # int tuples
bond_type = np.zeros((num_edge,1), np.uint8) #category
distance = np.zeros((num_edge,1),np.float32) #real
angle = np.zeros((num_edge,1),np.float32) #real
norm_xyz = preprocessing.normalize(xyz, norm='l2')
ij=0
for i in range(num_atom):
for j in range(num_atom):
if i==j: continue
edge_index[ij] = [i,j]
bond = mol.GetBondBetweenAtoms(i, j)
if bond is not None:
bond_type[ij] = label_encoding(bond.GetBondType(),BOND_TYPE)
distance[ij] = ((xyz[i] - xyz[j])**2).sum()**0.5
angle[ij] = (norm_xyz[i]*norm_xyz[j]).sum()
ij+=1
else :
raise Exception(f"""{categorical_encoding} invalid categorical labeling""")
##---- Define the graph structure
graph = Struct(
molecule_name = molecule_name,
smiles = Chem.MolToSmiles(mol),
axyz = [a,xyz],
node = [symbol, acceptor, donor, aromatic, hybridization, num_h, atomic,],
edge = [bond_type, distance, angle],
edge_index = edge_index,
coupling = coupling,
)
return graph
#############################################################################################################
# #
# Load Champs Datasets #
# #
#############################################################################################################
def read_champs_xyz(xyz_file):
line = read_list_from_file(xyz_file, comment=None)
num_atom = int(line[0])
xyz=[]
symbol=[]
for n in range(num_atom):
l = line[1+n]
l = l.replace('\t', ' ').replace(' ', ' ')
l = l.split(' ')
symbol.append(l[0])
xyz.append([float(l[1]),float(l[2]),float(l[3]),])
return symbol, xyz
def mol_from_axyz(symbol, xyz):
charged_fragments = True
quick = True
charge = 0
atom_no = get_atomicNumList(symbol)
mol = xyz2mol(atom_no, xyz, charge, charged_fragments, quick)
return mol
def load_csv():
"""
load_csv --> load the GroupBy DataFrames (Grouping by molecule names)
"""
DATA_DIR = '/champs-2019/input'
#structure
df_structure = pd.read_csv(DATA_DIR + '/csv/structures.csv')
#coupling
df_train = pd.read_csv(DATA_DIR + '/csv/train_transform.csv')
df_test = pd.read_csv(DATA_DIR + '/csv/test.csv')
df_test['scalar_coupling_constant']=0
df_test['transform']=0
df_scalar_coupling = pd.concat([df_train,df_test])
df_scalar_coupling_contribution = pd.read_csv(DATA_DIR + '/csv/scalar_coupling_contributions.csv')
df_scalar_coupling = pd.merge(df_scalar_coupling, df_scalar_coupling_contribution,
how='left', on=['molecule_name','atom_index_0','atom_index_1','atom_index_0','type'])
gb_scalar_coupling = df_scalar_coupling.groupby('molecule_name')
gb_structure = df_structure.groupby('molecule_name')
return gb_structure, gb_scalar_coupling
#############################################################################################################
# #
# Tests check . #
# #
#############################################################################################################
def run_check_xyz():
''' check xyz files '''
xyz_dir = '/champs-2019/input/structures'
name =[
'dsgdb9nsd_000001',
'dsgdb9nsd_000002',
'dsgdb9nsd_000005',
'dsgdb9nsd_000007',
'dsgdb9nsd_037490',
'dsgdb9nsd_037493',
'dsgdb9nsd_037494',
]
for n in name:
xyz_file = xyz_dir + '/%s.xyz'%n
symbol, xyz = read_champs_xyz(xyz_file)
mol = mol_from_axyz(symbol, xyz)
smiles = Chem.MolToSmiles(mol)
print(n, smiles)
image = np.array(Chem.Draw.MolToImage(mol,size=(128,128)))
image_show('',image)
cv2.waitKey(0)
def run_check_graph():
''' check graph construction '''
gb_structure, gb_scalar_coupling = load_csv()
molecule_name = 'dsgdb9nsd_000001'
normalize_coupling = False
graph = make_graph(molecule_name, gb_structure, gb_scalar_coupling, normalize_coupling)
print('')
print(graph)
print('graph.molecule_name:', graph.molecule_name)
print('graph.smiles:', graph.smiles)
print('graph.node:', np.concatenate(graph.node,-1).shape)
print('graph.edge:', np.concatenate(graph.edge,-1).shape)
print('graph.edge_index:', graph.edge_index.shape)
print('-----')
print('graph.coupling.index:', graph.coupling.index.shape)
print('graph.coupling.type:', graph.coupling.type.shape)
print('graph.coupling.value:', graph.coupling.value.shape)
print('graph.coupling.contribution:', graph.coupling.contribution.shape)
print('graph.coupling.id:', graph.coupling.id)
print('')
exit(0)
zz=0
#############################################################################################################
# #
# Build graphs #
# #
#############################################################################################################
def do_one(p, categorical_encoding='one_hot', normalize_coupling=False):
''' Create and save the graph of molecule name: p '''
i, molecule_name, gb_structure, gb_scalar_coupling, graph_file = p
g = make_graph(molecule_name, gb_structure, gb_scalar_coupling, categorical_encoding, normalize_coupling)
print(i, g.molecule_name, g.smiles)
write_pickle_to_file(graph_file,g)
##----
def run_convert_to_graph(categorical_encoding='one_hot', normalize_coupling = False , graph_dir='/champs-2019/input/structure/graph1'):
'''
Convert Train and Test data to graph structures and save each graph as .pkl file in graph_dir path
'''
# graph_dir = '/champs-2019/input/structure/graph1'
os.makedirs(graph_dir, exist_ok=True)
gb_structure, gb_scalar_coupling = load_csv()
molecule_names = list(gb_scalar_coupling.groups.keys())
molecule_names = np.sort(molecule_names)
param=[]
for i, molecule_name in enumerate(molecule_names):
graph_file = graph_dir + '/%s.pickle'%molecule_name
p = (i, molecule_name, gb_structure, gb_scalar_coupling, graph_file)
if i<2000:
do_one(p, categorical_encoding, normalize_coupling)
else:
param.append(p)
if 1:
pool = mp.Pool(processes=16)
pool.map(partial(do_one, categorical_encoding=categorical_encoding, normalize_coupling=normalize_coupling), param)
#############################################################################################################
# #
# Build Cross-Validation folds #
# #
#############################################################################################################
def run_make_split(folds):
'''
Methods for building cv folds: each fold is represented by two .npy files of unique molecule names in train / validation data fold
Arguments :
folds (type: int): number of validation folds
save train / valid npy files with related molecule names.
'''
split_dir = '/champs-2019/input/split'
csv_file = '/champs-2019/input/csv/train.csv'
print('Read train data')
df = gd.read_csv(csv_file)
df['molecule_name_hash'] = df['molecule_name'].data.hash()
# get unique molecules
print('Get unique molecules names')
molecule_names = df['molecule_name'].unique().to_pandas().values
molecule_names = np.sort(molecule_names)
print('Create train / validation folds')
debug_split = molecule_names[:1000]
np.save(split_dir + '/debug_split_by_mol.%d.npy'%len(debug_split), debug_split)
print(debug_split[0:5]) #'dsgdb9nsd_001679'
for fold in range(folds):
print(fold)
mask = df['molecule_name_hash']%folds==fold
tr, va = df[~mask]['molecule_name'],df[mask]['molecule_name']
train_split = tr.unique().to_pandas().values
valid_split = va.unique().to_pandas().values
np.save(split_dir + '/train_split_by_mol_hash.%d.npy'%(fold),train_split)
np.save(split_dir + '/valid_split_by_mol_hash.%d.npy'%(fold),valid_split)
pass
#############################################################################################################
# #
# main program #
# #
#############################################################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Build graph and cross-validation data')
parser.add_argument('--cv', default=False, action ='store_true', help='whether to build cv npy folds or not')
parser.add_argument('--folds', type=int, help='number of validation folds')
parser.add_argument('--categorical_encoding', type=str, help='How to encode categorical values: "one_hot" vs "label"' )
parser.add_argument('--graph_dir', type=str, help='output dir for saving the graph structure of all the molecules')
parser.add_argument('--normalize', default=False, action ='store_true', help='whether to normalize couplings')
parser.add_argument('--ranktransform', default=False, action ='store_true', help='whether to comput the normal dist of coupling')
args = parser.parse_args()
print( '%s: calling main function ... ' % os.path.basename(__file__))
# test the graph structure : run_check_graph()
if args.cv:
# Build cv folds
run_make_split(args.folds)
# Convert data to graphs
run_convert_to_graph(args.categorical_encoding, args.normalize, args.graph_dir)
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/common_model.py | from mpnn_model.common import *
from torch_scatter import *
from torch_geometric.utils import scatter_
import torch
import torch.nn as nn
import torch.nn.functional as F
import numbers
# Fast ai
from fastai.tabular import *
from fastai.callbacks import SaveModelCallback
__all__ = ['LinearBn', 'MlpBn', 'CustomTabularModel', 'get_node_encoder', 'get_edge_encoder',]
#############################################################################################################
# #
# Linear batch-norm layers #
# #
#############################################################################################################
class LinearBn(nn.Module):
'''
Batch norm dense layer
Arguments:
- in_channel: int, Input dimension
- out_channel: int, Output dimension
- act: str, Activation function to apply to the output of batch normalizaiton.
'''
def __init__(self, in_channel, out_channel, act=None):
super(LinearBn, self).__init__()
self.linear = nn.Linear(in_channel, out_channel, bias=False)
self.bn = nn.BatchNorm1d(out_channel, eps=1e-05, momentum=0.1)
if act is not None :
self.act = F.__dict__[act]
else:
self.act = act
def forward(self, x):
x = self.linear(x)
if self.bn is not None:
x = self.bn(x)
if self.act is not None:
x = self.act(x)
return x
class MlpBn(nn.Module):
''' Fully connected feed forward neural network: stacked batch norm layers with dropout
Args:
input_dim (int32): the dimension of input
dimensions (int32): the dimension of hiddenlayers.
act (string): Activation function to apply to the output of each layer.
dropout (float): the dropout probabily to apply to each layer.
'''
def __init__(self,
input_dim,
dimensions,
activation='Relu',
dropout=0.):
super(MlpBn, self).__init__()
self.input_dim = input_dim
self.dimensions = dimensions
self.activation = activation
self.dropout = dropout
# Modules
self.linears = nn.ModuleList([LinearBn(input_dim, dimensions[0], act=activation)])
for din, dout in zip(dimensions[:-1], dimensions[1:]):
self.linears.append(LinearBn(din, dout, act=self.activation))
def forward(self, x):
for i,lin in enumerate(self.linears):
x = lin(x)
if self.dropout > 0:
x = F.dropout(x, self.dropout, training=self.training)
return x
#############################################################################################################
# #
# Tabular model #
# #
#############################################################################################################
class CustomTabularModel(nn.Module):
"Basic model for tabular data."
def __init__(self, emb_szs:ListSizes, n_cont:int, out_sz:int, layers:Collection[int], ps:Collection[float]=None,
emb_drop:float=0., y_range:OptRange=None, use_bn:bool=True, bn_final:bool=False):
super().__init__()
ps = ifnone(ps, [0]*len(layers))
ps = listify(ps, layers)
#self.bsn = BatchSwapNoise(0.15)
self.embeds = nn.ModuleList([embedding(ni, nf) for ni,nf in emb_szs])
self.emb_drop = nn.Dropout(emb_drop)
self.bn_cont = nn.BatchNorm1d(n_cont)
n_emb = sum(e.embedding_dim for e in self.embeds)
self.n_emb,self.n_cont,self.y_range = n_emb,n_cont,y_range
sizes = self.get_sizes(layers, out_sz)
actns = [nn.ReLU(inplace=True) for _ in range(len(sizes)-2)] + [None]
layers = []
for i,(n_in,n_out,dp,act) in enumerate(zip(sizes[:-1],sizes[1:],[0.]+ps,actns)):
layers += bn_drop_lin(n_in, n_out, bn=use_bn and i!=0, p=dp, actn=act)
if bn_final: layers.append(nn.BatchNorm1d(sizes[-1]))
layers = layers[:-2]
self.layers = nn.Sequential(*layers)
def get_sizes(self, layers, out_sz):
return [self.n_emb + self.n_cont] + layers + [out_sz]
def forward(self, x_cat:Tensor, x_cont:Tensor) -> Tensor:
#self.bsn(x_cat)
if self.n_emb != 0:
x = [e(x_cat[:,i]) for i,e in enumerate(self.embeds)]
x = torch.cat(x, 1)
x = self.emb_drop(x)
if self.n_cont != 0:
x_cont = self.bn_cont(x_cont)
x = torch.cat([x, x_cont], 1) if self.n_emb != 0 else x_cont
x = self.layers(x)
return x
#############################################################################################################
# #
# Node and edge encoders #
# #
#############################################################################################################
def get_node_encoder(encoding, emb_sz, n_cont, node_dim, layers, activation, dropout=0.):
'''
- Get the MLP network to process nodes features and build node representation
'''
if encoding == 'one_hot':
return MlpBn(node_dim, dimensions=layers, activation=activation, dropout=dropout)
elif encoding== 'label':
# embed symbol, acceptor, donor, aromatic, hybridization
# emb_sz = [(6,4), (3,3), (3,3), (3,3), (5,4)]
return CustomTabularModel(emb_szs = emb_sz, out_sz=2, n_cont=n_cont, layers=layers, ps=[dropout], emb_drop=0.)
def get_edge_encoder(encoding, emb_sz, n_cont, node_dim, edge_dim, layers, activation, dropout=0.):
'''
Get the MLP network to process edges features and build matrix representation
Arguments:
- encoding: str, the encoding of categorical variables : "label" vs "one_hot"
- emb_sz: list of tuples, the embedding size of each categorical variable
- n_cont: int, the number of continious variables
- node_dim: int, the dimension of node's representation
- edge_dim: int, the input dimension of edge's features
- layers: list of int, the dimensions of hidden layers
- activation: str, the activation to apply for layers.
- dropout: [float], dropout of each hidden layer.
'''
if encoding == 'one_hot':
return MlpBn(edge_dim, dimensions=layers+[node_dim*node_dim], activation=activation, dropout=dropout)
elif encoding== 'label':
# emb_sz = [(5,8)]
return CustomTabularModel(emb_szs = emb_sz, n_cont=n_cont , out_sz=2, layers=layers+[node_dim*node_dim], ps=[dropout], emb_drop=0.)
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/GaussRank.py | import numpy as np
from scipy.special import erfinv
from bisect import bisect_left
import pandas as pd
class GaussRankMap():
def __init__(self, training_maps=[], coupling_order=[]):
self.epsilon = 0.001
self.lower = -1 + self.epsilon
self.upper = 1 - self.epsilon
self.range = self.upper - self.lower
self.training_maps = training_maps
self.coupling_order = coupling_order
def fit_training(self, df, reset=False):
if self.training_maps and reset == True:
self.training_maps = []
self.coupling_order = []
elif self.training_maps:
print('GaussRank Mapping already exists. To overide set reset=True.')
return
tf = None
for coupling_type in df['type'].unique():
self.coupling_order.append(coupling_type)
X = df[df['type']==coupling_type]['scalar_coupling_constant']
i = np.argsort(X, axis=0)
j = np.argsort(i, axis=0)
assert (j.min() == 0).all()
assert (j.max() == len(j) - 1).all()
j_range = len(j) - 1
self.divider = j_range / self.range
transformed = j / self.divider
transformed = transformed - self.upper
transformed = erfinv(transformed)
#print(coupling_type, len(X), len(transformed))
if tf is None:
tf = transformed.copy(deep=True)
else:
tf = tf.append(transformed.copy(deep=True))
training_map = pd.concat([X, transformed], axis=1)
training_map.columns=['sc','sct']
training_map.sort_values(['sc'], ascending=[1], inplace=True)
training_map.reset_index(inplace=True, drop=True)
self.training_maps.append(training_map)
return tf
def convert_df(self, df, from_coupling=True):
#coupling_idx = self.coupling_order.index(coupling_type)
if from_coupling==True:
column = 'sc'
target = 'sct'
df_column = 'scalar_coupling_constant'
else:
column = 'sct'
target = 'sc'
df_column = 'prediction'
output = None
# Do all of the sorts per coupling type in a single operation
for coupling_type in df['type'].unique():
training_map = self.training_maps[self.coupling_order.index(coupling_type)]
#training_map = cudf.DataFrame.from_pandas(self.training_maps[self.coupling_order.index(coupling_type)])
pos = training_map[column].searchsorted(df[df['type']==coupling_type][df_column], side='left')
pos[pos>=len(training_map)] = len(training_map)-1
pos[pos-1<=0] = 0
x1 = training_map[column].iloc[pos].reset_index(drop=True)
x2 = training_map[column].iloc[pos-1].reset_index(drop=True) # larger of the two
y1 = training_map[target].iloc[pos].reset_index(drop=True)
y2 = training_map[target].iloc[pos-1].reset_index(drop=True)
z = df[df['type']==coupling_type].reset_index(drop=False)[['index',df_column]]
relative = z['index'],(z[df_column]-x2) / (x1-x2)
if output is None:
output = pd.DataFrame(list(zip(relative[0],((1-relative[1])*y2 + (relative[1]*y1)))))
else:
output = output.append(pd.DataFrame(list(zip(relative[0],((1-relative[1])*y2 + (relative[1]*y1))))))
output.columns = ['index',target]
output = output.set_index('index', drop=True)
# output = output.sort_index()
# < min or > max
return output #pd.DataFrame(list(zip(relative[0],((1-relative[1])*y2 + (relative[1]*y1))))) | 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/dataset.py | #
# coupling_cols = ['atom_index_0', 'atom_index_1','coupling_type','scalar_coupling',
# 'gaussrank_coupling','fc','sd','pso','dso','id',]
#
# edge_cols : ['atom_index_0', 'atom_index_1', 'edge_type', 'distance', 'angle' ]
#
# nodes cols : ['symbol','acceptor', 'donor', 'aromatic', 'hybridization', 'num_h', 'atomic']
#
###################################################
from mpnn_model.common import *
import torch
from torch import _utils
from fastai.torch_core import to_device
import torch.nn.functional as F
from fastai.basic_data import DataBunch
from fastai.basic_data import *
from fastai.tabular import *
from fastai import *
import copy
#EDGE_DIM = 6
#NODE_DIM = 13 ## 93 13
NUM_TARGET = 8 ## for 8 bond's types
NODE_MAX, EDGE_MAX, COUPLING_MAX = 32, 816, 136
DATA_DIR = '/rapids/notebooks/srabhi/champs-2019/input'
# __all__ = ['TensorBatchDataset', 'tensor_collate', 'BatchGraphDataset', 'null_collate',
# 'BatchDataLoader', '_BatchDataLoaderIter', 'BatchDataBunch' ]
#############################################################################################################
# #
# Load batch of tensors #
# #
#############################################################################################################
class BatchDataset(object):
"""An abstract class representing a Batch Dataset.
All other datasets should subclass this. All subclasses should override
``__len__``, which provides the size of the dataset, ``__getitem__``,
supporting integer indexing of batches in range from 0 to len(self)//batchsize exclusive,
and ``shuffle`` which randomly shuffles the data, generally called per epoch.
Batch datasets are meant to be iterated over in order rather than randomly accessed
so the randomization has to happen first.
"""
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __add__(self):
raise NotImplementedError
def shuffle(self):
raise NotImplementedError
#############################################################################################################
# #
# Batch dataset #
# #
#############################################################################################################
class TensorBatchDataset(BatchDataset):
"""Batch Dataset wrapping Tensors.
Args:
*tensors (Tensor): tensors that have the same size of the first dimension.
6 tensors are needed:
batch_node, batch_edge, batch_coupling,
batch_num_node, batch_num_edge, batch_num_coupling
batch_size: The size of the batch to return
pin_memory (bool, optional): If ``True``, the dataset will be pinned memory for faster copy to GPU.
I saw no performance improvement to doing so but results may vary.
COUPLING_MAX: dimension of molecule coupling features vector
mode: ['train', 'test']: when mode == 'test' return addition infor vector with coupling observations ids
csv: ['train', 'test']: source of data
Method __getitem__ returns:
2 modes:
'train' : (node, edge_feats, edge_index, node_index, coupling_index), targets
'test' : (node, edge_feats, edge_index, node_index, coupling_index), targets, infor
It calls the collate function 'tensor_collate_old' in roder to :
- Remove padded values from the four tensors : batch_node, batch_edge, batch_coupling, batch_graussrank,
- Re-arrange data into X / targets
- Create the index matrices: edge_index, node_index, to keep track the variable sizes of molecule graphs.
"""
def __init__(self, molecule_names, tensors, collate_fn, batch_size=1, pin_memory=False, COUPLING_MAX=136, mode = 'train', csv='train'):
assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
self.tensors = tensors
self.batch_size = batch_size
self.num_samples = tensors[0].size(0)
self.mode = mode
self.csv = csv
self.molecule_names = molecule_names
self.COUPLING_MAX = COUPLING_MAX
self.collate_fn = collate_fn
if pin_memory:
for tensor in self.tensors:
tensor.pin_memory()
def __len__(self):
if self.num_samples % self.batch_size == 0:
return self.num_samples // self.batch_size
else:
return self.num_samples // self.batch_size + 1
def __getitem__(self, item):
idx = item * self.batch_size
# Need to handle odd sized batches if data isn't divisible by batchsize
if idx < self.num_samples and (
idx + self.batch_size < self.num_samples or self.num_samples % self.batch_size == 0):
batch_data = [tensor[idx:idx + self.batch_size] for tensor in self.tensors]
elif idx < self.num_samples and idx + self.batch_size > self.num_samples:
batch_data = [tensor[idx:] for tensor in self.tensors]
else:
raise IndexError
return self.collate_fn(batch_data, self.batch_size, self.COUPLING_MAX, self.mode)
def __add__(self, tensors):
assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
assert len(self.tensors) == len(tensors)
assert all(self_tensor[0].shape == tensor[0].shape for self_tensor, tensor in zip(self.tensors, tensors))
num_add_samples = tensors[0].size(0)
self.num_samples = self.num_samples + num_add_samples
self.tensors = [torch.cat((self_tensor, tensor)) for self_tensor, tensor in zip(self.tensors, tensors)]
def shuffle_max(self):
num_nodes = self.tensors[4] #num nodes
# sort tensors w.r.t the number of nodes in each molecule: Get larger ones first
sort_id = num_nodes.argsort(descending=True)
# Compute the first batch
first_batch_id = sort_id[:self.batch_size]
# Shuffle the rest of indices
idx = sort_id[self.batch_size:][torch.randperm(self.num_samples-self.batch_size, dtype=torch.int64, device='cuda')]
final_idx = torch.cat([first_batch_id, idx])
#print(final_idx.shape)
self.tensors = [tensor[final_idx] for tensor in self.tensors]
def shuffle(self):
idx = torch.randperm(self.num_samples, dtype=torch.int64, device='cuda')
self.tensors = [tensor[idx] for tensor in self.tensors]
def get_total_samples(self):
"""
Update total sample of dataset with the total number of coupling obs
Returns:
mask : the cv mask used to select a group of molecule names
"""
self.df = pd.read_csv(DATA_DIR + '/csv/%s.csv'%self.csv)
mask = self.df['molecule_name'].isin(self.molecule_names)
self.total_samples = self.df[mask].shape[0]
return mask
#############################################################################################################
# #
# batch loader #
# #
#############################################################################################################
class BatchDataLoader(object):
"""Batch Data loader. Takes in a batch dataset and returns iterators that return whole batches of data.
Arguments:
dataset (BatchDataset): dataset from which to load the data.
shuffle (bool, optional): set to ``True`` to have the data reshuffled
at every epoch (default: ``False``).
pin_memory (bool, optional): If ``True``, the data loader will copy tensors
into CUDA pinned memory before returning them.
drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
if the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last batch
will be smaller. (default: ``False``)
device: str, return batch data in the related device (default: )
"""
def __init__(self, batchdataset, shuffle=False, max_first=False,
pin_memory=False, drop_last=False, device='cuda'):
self.batch_size = batchdataset.batch_size
self.dataset = batchdataset
self.shuffle = shuffle
self.max_first = max_first
self.pin_memory = pin_memory
self.drop_last = drop_last
self.device = device
def __iter__(self):
return _BatchDataLoaderIter(self)
def __len__(self):
if self.drop_last and self.dataset.num_samples%self.batch_size != 0:
return len(self.dataset)-1
else:
return len(self.dataset)
class _BatchDataLoaderIter(object):
"""Iterates once over the BatchDataLoader's batchdataset, shuffling if requested"""
def __init__(self, loader):
self.batchdataset = loader.dataset
self.batch_size = loader.batch_size
self.pin_memory = loader.pin_memory and torch.cuda.is_available()
self.drop_last = loader.drop_last
self.device = loader.device
if loader.max_first:
self.batchdataset.shuffle_max()
elif loader.shuffle:
self.batchdataset.shuffle()
self.idx = 0
def __len__(self):
if self.drop_last and self.batchdataset.num_samples%self.batch_size != 0:
return len(self.batchdataset)-1
else:
return len(self.batchdataset)
def __next__(self):
if self.idx >= len(self):
raise StopIteration
if self.batchdataset.mode == 'test':
X, y, infor = self.batchdataset[self.idx]
batch = (X, y)
else:
batch = self.batchdataset[self.idx]
# Note Pinning memory was ~10% _slower_ for the test examples I explored
if self.pin_memory:
batch = _utils.pin_memory.pin_memory_batch(batch)
self.idx = self.idx+1
# move the batch data to device
batch = to_device(batch, self.device)
# return in the form of : xb,yb = (x_cat, x_cont), y
if self.batchdataset.mode == 'test':
return batch, infor
return batch
next = __next__ # Python 2 compatibility
def __iter__(self):
return self
#############################################################################################################
# #
# Fastai DataBunch #
# #
#############################################################################################################
class BatchDataBunch(DataBunch):
@classmethod
def remove_tfm(cls, tfm:Callable)->None:
"Remove `tfm` from `self.tfms`."
if tfm in cls.tfms: cls.tfms.remove(tfm)
@classmethod
def add_tfm(cls,tfm:Callable)->None:
"Add `tfm` to `self.tfms`."
cls.tfms.append(tfm)
@classmethod
def create(cls, train_ds, valid_ds, test_ds=None, path:PathOrStr='.', bs:int=64, val_bs=None,
num_workers:int=defaults.cpus, device:torch.device=None,
collate_fn:Callable=data_collate, tfms: List[Callable]=None,
size:int=None, **kwargs)->'BatchDataBunch':
cls.tfms = listify(tfms)
val_bs = ifnone(val_bs, bs)
datasets = [train_ds, valid_ds]
if valid_ds is not None:
cls.empty_val = False
else:
cls.empty_val = True
datasets.append(test_ds)
cls.device = defaults.device if device is None else device
dls = [BatchDataLoader(d, shuffle=s, max_first=s, pin_memory=False, drop_last=False, device=cls.device) for d,s in
zip(datasets,(True,False,False)) if d is not None]
cls.path = path
cls.dls = dls
assert not isinstance(dls[0],DeviceDataLoader)
# load batch in device
if test_ds is not None:
cls.train_dl, cls.valid_dl, cls.test_dl = dls
else:
cls.train_dl, cls.valid_dl = dls
cls.path = Path(path)
return cls
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/message_passing.py | from mpnn_model.common_model import *
from mpnn_model.common import *
from torch_scatter import *
from torch_geometric.utils import scatter_
import torch
import torch.nn as nn
import torch.nn.functional as F
import numbers
__all__ = ['message_pass' , 'MessagePassing', 'GRUUpdate', 'Set2Set']
#############################################################################################################
# #
# MPNN- PHASE1 : Message Passing #
# #
#############################################################################################################
def message_pass(node_states, edge_index, a_in):
"""Computes a_t from h_{t-1}, see bottom of page 3 in the paper.
a_t = sum_w A(e_vw) . h^t
Args:
node_states: [batch_size*num_nodes, node_dim] tensor (h_{t-1})
a_in (torch.float32): [batch_size*num_nodes, node_dim, node_dim]: Encoded edge matrix
edge_index [batch_size*num_edges, 2]: the indices of edges
Returns:
messages (torch.float32): [batch_size*num_nodes, node_dim] For each pair
of nodes in the graph a message is sent along both the incoming edge.
"""
num_node, node_dim = node_states.shape
edge_index = edge_index.t().contiguous()
x_i = torch.index_select(node_states, 0, edge_index[0])
message = torch.matmul( x_i.view(-1,1,node_dim), a_in).view(-1, node_dim)
message = scatter_('mean', message, edge_index[1], dim_size=num_node)
return message
class MessagePassing(nn.Module):
'''
A feed forward neural network is applied to each edge in the adjacency matrix,
which is assumed to be vector valued. It maps the edge vector to a
node_dim x node_dim matrix, denoted NN(e). The message from node v -> w is
then NN(e) h_v. This is a generalization of the message function in the
GG-NN paper, which embeds the discrete edge label as a matrix.
'''
def __init__(self, ConfigParams):
'''
'''
super(MessagePassing, self).__init__()
self.encoding = ConfigParams['model']['mpnn']['node_encoder']['encoding']
self.edge_encoder = get_edge_encoder(**ConfigParams['model']['mpnn']['edge_encoder'])
self.node_dim = ConfigParams['model']['mpnn']['edge_encoder']['node_dim']
self.device = ConfigParams['train']['device']
if self.device == 'cuda':
self.bias = nn.Parameter(torch.Tensor(self.node_dim)).cuda()
else:
self.bias = nn.Parameter(torch.Tensor(self.node_dim))
self.bias.data.uniform_(-1.0 / math.sqrt(self.node_dim), 1.0 / math.sqrt(self.node_dim))
self._a_in = []
def _pre_encode_edges(self, edge):
'''
Args:
edge: [batch_size*num_edges, edge_dim] edge features
Return:
A neural representation of the edge festures where each vector is represented as
matrix of shape node_dim x node_dim
'''
if self.encoding == 'label':
edge_cat = edge[:, 0].long().view(-1,1)
edge_cont = edge[:, 1:].float()
edge = self.edge_encoder(edge_cat, edge_cont).view(-1,self.node_dim,self.node_dim)
elif self.encoding == 'one_hot':
edge = self.edge_encoder(edge).view(-1, self.node_dim, self.node_dim)
self._a_in = edge
def forward(self, node_states, edge_index, edge, reuse_graph_tensors=True):
'''
Args:
node_states: [batch_size*num_nodes, node_dim] tensor (h_{t-1})
edge_in: [batch_size*num_nodes, edge_dim] (torch.int32)
reuse_graph_tensors: Boolean to indicate whether or not the self._a_in
should be reused or not. Should be set to False on first call, and True
on subsequent calls.
Returns:
message_t: [batch_size * num_nodes, node_dim] which is the node representations
after a single propgation step
'''
if not reuse_graph_tensors:
self._pre_encode_edges(edge)
new_state = message_pass(node_states, edge_index, self._a_in)
return F.relu(new_state + self.bias)
#############################################################################################################
# #
# MPNN- PHASE2 : Updage nodes states #
# #
#############################################################################################################
class GRUUpdate(nn.Module):
def __init__(self, ConfigParams):
super(GRUUpdate, self).__init__()
self.node_dim = ConfigParams['model']['mpnn']['edge_encoder']['node_dim']
self.gru = nn.GRU(self.node_dim, self.node_dim, batch_first=False, bidirectional=False)
def forward(self, messages, node_states):
"""Build the fprop graph.
Args:
node_states: [batch_size*num_nodes, node_dim] tensor (h_{t-1})
messages: [batch_size*num_nodes, node_dim] (a_t from the GGNN paper)
Returns:
updated_states: [batch_size*num_nodes, node_dim]
"""
num_node, node_dim = node_states.shape
update, _ = self.gru(messages.view(1,-1,self.node_dim),
node_states.view(1,num_node,-1))
return update.view(-1,node_dim)
#############################################################################################################
# #
# MPNN- PHASE3 : Readout function #
# #
#############################################################################################################
class Set2Set(torch.nn.Module):
def softmax(self, x, index, num=None):
x = x - scatter_max(x, index, dim=0, dim_size=num)[0][index]
x = x.exp()
x = x / (scatter_add(x, index, dim=0, dim_size=num)[index] + 1e-16)
return x
def __init__(self, in_channel, processing_step=1, num_layer = 1, batch_size=32):
super(Set2Set, self).__init__()
out_channel = 2 * in_channel
self.processing_step = processing_step
self.batch_size = batch_size
self.in_channel = in_channel
self.out_channel = out_channel
self.num_layer = num_layer
self.lstm = torch.nn.LSTM(out_channel, in_channel, num_layer)
self.lstm.reset_parameters()
def forward(self, x, batch_index):
h = (x.new_zeros((self.num_layer, self.batch_size, self.in_channel)),
x.new_zeros((self.num_layer, self.batch_size, self.in_channel)))
# zeros of shape: bs x 2*node_dim : init q_star
q_star = x.new_zeros(self.batch_size, self.out_channel)
# n readout steps
for i in range(self.processing_step):
# read from memory
q, h = self.lstm(q_star.unsqueeze(0), h)
q = q.view(self.batch_size, -1)
#energies : dot product between input_set and q
e = (x * q[batch_index]).sum(dim=-1, keepdim=True) #shape = num_node x 1
# Compute attention
a = self.softmax(e, batch_index, num=self.batch_size) #shape = num_node x 1
#compute readout
r = scatter_add(a * x, batch_index, dim=0, dim_size=self.batch_size) #apply attention #shape = batch_size x ...
#update q_star
q_star = torch.cat([q, r], dim=-1)
# print(q_star.shape)
return q_star
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/common_constants.py | import torch
### Helpers for normalization
NUM_COUPLING_TYPE=8
COUPLING_TYPE_STATS=[
#type #mean, std, min, max
'1JHC', 94.9761528641869, 18.27722399839607, 66.6008, 204.8800,
'2JHC', -0.2706244378832, 4.52360876732858, -36.2186, 42.8192,
'3JHC', 3.6884695895355, 3.07090647005439, -18.5821, 76.0437,
'1JHN', 47.4798844844683, 10.92204561670947, 24.3222, 80.4187,
'2JHN', 3.1247536134185, 3.67345877025737, -2.6209, 17.7436,
'3JHN', 0.9907298624944, 1.31538940138001, -3.1724, 10.9712,
'2JHH', -10.2866051639817, 3.97960190019757, -35.1761, 11.8542,
'3JHH', 4.7710233597359, 3.70498129755812, -3.0205, 17.4841,
]
NUM_COUPLING_TYPE = len(COUPLING_TYPE_STATS)//5
COUPLING_TYPE = [ COUPLING_TYPE_STATS[i*5 ] for i in range(NUM_COUPLING_TYPE)]
REVERSE_COUPLING_TYPE = dict(zip(range(8), COUPLING_TYPE))
COUPLING_TYPE_MEAN = torch.tensor([COUPLING_TYPE_STATS[i*5+1] for i in range(NUM_COUPLING_TYPE)], dtype=torch.float32).cuda()
COUPLING_TYPE_STD = torch.tensor([ COUPLING_TYPE_STATS[i*5+2] for i in range(NUM_COUPLING_TYPE)], dtype=torch.float32).cuda()
COUPLING_MIN_ = [ COUPLING_TYPE_STATS[i*5+3 ] for i in range(NUM_COUPLING_TYPE)]
COUPLING_MAX_ = [ COUPLING_TYPE_STATS[i*5+4 ] for i in range(NUM_COUPLING_TYPE)]
NODE_MAX, EDGE_MAX = 32, 816
COUPLING_MAX_DICT = {'1JHC': 20, '2JHC': 36, '3JHC': 66, '1JHN': 8, '2JHN': 12, '3JHN': 18, '3JHH': 36, '2JHH': 19 }
#--- Set of Categorical modalities
SYMBOL = ['H', 'C', 'N', 'O', 'F']
# model criterion
model_dict = { '1JHC': 'lmae', '2JHC': 'lmae', '3JHC': 'lmae', '3JHH': 'lmae',
'1JHN': 'mlmae' , '2JHN':'mlmae' , '3JHN':'mlmae', '2JHH':'mlmae'}
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/build_predictions.py | import os
from datetime import datetime
from functools import partial
from timeit import default_timer as timer
from time import time
import warnings
warnings.filterwarnings("ignore")
from mpnn_model.common import *
from mpnn_model.common_constants import *
from mpnn_model.train_loss import lmae_criterion
from mpnn_model.callback import *
from time import time
#############################################################################################################
# #
# Get prediction function #
# #
#############################################################################################################
def do_test(net, test_loader, test_len, num_output, predict_type, grm, normalize=False, gaussrank=False):
"""
do_test -> return list of (indices, predictions)
Input arguments:
net (nn.module) : the graph neural network model
test_loader (Dataloader): Test data loader
test_len (int): length of test dataset
"""
test_num = 0
test_predict = []
test_coupling_type = []
test_coupling_value = []
test_id = []
test_contributions = []
molecule_representation = []
num_batches = 0
test_loss = 0
start = timer()
for b, (((node, edge, edge_index, node_index, coupling_index, type_, atomic), targets), infor) in enumerate(test_loader):
net.eval()
with torch.no_grad():
coupling_value = targets[0]
predict = net(node, edge, edge_index, node_index, coupling_index, type_, atomic)
if predict_type:
predict = torch.gather(predict[0], 1, targets[3].unsqueeze(1)).view(-1)
predict = [predict, [], []]
if normalize:
coupling_mean = torch.gather(COUPLING_TYPE_MEAN, 0, targets[3])
coupling_std = torch.gather(COUPLING_TYPE_STD, 0, targets[3])
predict = (predict * coupling_std) + coupling_mean
coupling_value = (coupling_value * coupling_std) + coupling_mean
predict = [predict, [], []]
loss = lmae_criterion(predict, coupling_value, coupling_value, [], [])
batch_size = test_loader.batch_size
test_id.extend(list(infor.data.cpu().numpy()))
test_predict.append(predict[0].data.cpu().numpy())
molecule_representation.append(net.pool.data.cpu().numpy())
test_coupling_type.append(coupling_index[:,-2].data.cpu().numpy())
test_coupling_value.append(coupling_value.data.cpu().numpy())
test_loss += loss.item()*batch_size
test_num = len(test_id)
num_batches += batch_size
print('\r %8d/%8d %0.2f %s'%( test_num, test_len, test_num/test_len,
time_to_str(timer()-start,'min')),end='',flush=True)
pass
test_loss = test_loss/num_batches
print('\n')
print('predict')
predict = np.concatenate(test_predict)
if num_output==5:
contributions = np.concatenate(test_contributions)
else:
contributions = []
test_coupling_value = np.concatenate(test_coupling_value)
test_coupling_type = np.concatenate(test_coupling_type).astype(np.int32)
molecule_representation = np.concatenate(molecule_representation)
# convert gaussrank test predictions to their actual values
if gaussrank:
print('compute the reverse frame')
reverse_frame = get_reverse_frame(test_id, predict, test_coupling_type, test_coupling_value, grm)
predict = reverse_frame['scalar_coupling_constant'].values
else:
print('build preds frame')
reverse_frame = pd.DataFrame(predict)
reverse_frame['type'] = test_coupling_type
reverse_frame.columns = ['scalar_coupling_constant', 'type_ind']
reverse_frame['id'] = test_id
reverse_frame['true_scalar_coupling_constant'] = test_coupling_value
mae, log_mae = compute_kaggle_metric(reverse_frame.scalar_coupling_constant, reverse_frame.true_scalar_coupling_constant, reverse_frame.type_ind)
print('Compute lmae per type')
num_target = NUM_COUPLING_TYPE
for t in range(NUM_COUPLING_TYPE):
if mae[t] is None:
mae[t] = 0
log_mae[t] = 0
num_target -= 1
mae_mean, log_mae_mean = sum(mae)/NUM_COUPLING_TYPE, sum(log_mae)/NUM_COUPLING_TYPE
loss_ = log_mae + [ test_loss, mae_mean, log_mae_mean ]
return loss_, reverse_frame, contributions, molecule_representation | 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/data_collate.py | import torch
import torch.nn.functional as F
from mpnn_model.common import *
from mpnn_model.common_constants import *
from mpnn_model.data import *
import copy
DATA_DIR = '/rapids/notebooks/srabhi/champs-2019/input'
__all__ = ['tensor_collate_rnn', 'tensor_collate_baseline']
def tensor_collate_rnn(batch, batch_size, COUPLING_MAX, mode='train'):
"""
Function to apply dynamic padding of each batch in order to prepare inputs of the graph neural network
node, edge_feats, edge_index, node_index, coupling_index), targets
Returns:
X:
node [batch_size*molecules_num_nodes, node_dim] : nodes states, variable size per batch
edge_feats [batch_size*molecules_num_edges, edge_dim] : edges states, variable size per batch
edge_index [batch_size*molecules_num_edges, 2] : Index edges of the same molecule
node_index [batch_size*molecules_num_nodes, 1] : Index nodes of the same molecule
coupling_index
Y:
targets [N_coupling, 4]: tuple of four targets (scalar_coupling, coupling_gaussrank, coupling_contribs, coupling_type)
Test info:
infor: the ids of the coupling needed for build submission files # N_coupling,
"""
batch_node, batch_edge, batch_coupling, batch_num_node, batch_num_edge, batch_num_coupling = batch
batch_node = batch_node.reshape(-1, NODE_MAX, 7).float()
batch_edge = batch_edge.reshape(-1, EDGE_MAX, 5)
batch_coupling = batch_coupling.reshape(-1, COUPLING_MAX, 21)
batch_size = batch_node.shape[0]
#### Create nodes / edges / coupling masks : optimized V1 : TO be optimized
mask = torch.cat([F.pad(torch.ones(i, device='cuda'), (0,NODE_MAX-i)).unsqueeze(0) for i in batch_num_node], dim=0)
mask_coupling = torch.cat([F.pad(torch.ones(i, device='cuda'), (0,COUPLING_MAX-i)).unsqueeze(0) for i in batch_num_coupling], dim=0)
mask_edge = torch.cat([F.pad(torch.ones(i, device='cuda'), (0,EDGE_MAX-i)).unsqueeze(0) for i in batch_num_edge], dim=0)
#### Build the output X:
# Get effective nodes / edges / coupling values : without padding
node = batch_node[mask.bool()].view(-1, 7)
edge = batch_edge[mask_edge.bool()].view(-1, 5)
coupling = batch_coupling[mask_coupling.bool()].view(-1, 21)
# node indices track nodes of the same molecule
node_index = mask.nonzero()[:, 0]
# Get edges feats and indices
edge_feats = edge[:, 2:]
edge_index = edge[:, :2].long()
# Get coupling path index
coupling_index = coupling[:, 10:14].long()
num_coupling = coupling_index.shape[0]
#get sequence of coupling type
pad_vector = torch.zeros(num_coupling, device='cuda').long()-1
coupling_type_sequence = torch.cat([pad_vector.view(-1,1), coupling[:, 14:17].long()], 1)
# batch_coupling_index : track coupling values of the same molecule
batch_coupling_index = mask_coupling.nonzero()[:, 0]
# offset edge and coupling indices w.r.t to N of nodes in each molecule
offset = torch.cat([torch.zeros(1, device='cuda').long(), batch_num_node[:-1]]).cumsum(0)
#edge
expanded_offset = torch.cat([torch.zeros(num_edges, device='cuda')+offset[mol_index] for mol_index,num_edges in
enumerate(batch_num_edge)], 0)
edge_index = torch.add(edge_index, expanded_offset.unsqueeze(1).long())
#coupling
expanded_offset=torch.cat([torch.zeros(n_coupling, device='cuda')+offset[mol_index] for mol_index,n_coupling in
enumerate(batch_num_coupling)], 0)
coupling_index = torch.add(coupling_index, expanded_offset.unsqueeze(1).long())
# type_id
coupling_type = coupling[:, 2].long()
# new coupling index: atom_0, atom_1, atom_2, atom_3, coupling_type, batch_index
coupling_index = torch.cat([coupling_index, coupling_type.view(-1,1) , batch_coupling_index.view(-1, 1)], -1)
# get sequence of atomic number
coupling_atomic = coupling[:, 17:].long()
#### Get Targets
# 4 coupling contirbutions
coupling_contribution = coupling[:, 5:9]
#coupling value
coupling_value = coupling[:, 3]
#gauss_rank
gaussrank = coupling[:, 4]
targets = [coupling_value.float(), gaussrank.float(), coupling_contribution.float(), coupling_type.long()]
#### ids for inference time
infor = coupling[ : , 9]
# mode flag to return additional information for test data
if mode == 'test':
return (node, edge_feats, edge_index, node_index, coupling_index, coupling_type_sequence, coupling_atomic), targets, infor
return (node, edge_feats, edge_index, node_index, coupling_index, coupling_type_sequence, coupling_atomic), targets
def tensor_collate_baseline(batch, batch_size, COUPLING_MAX, mode='train'):
"""
Function to apply dynamic padding of each batch in order to prepare inputs of the graph neural network
node, edge_feats, edge_index, node_index, coupling_index), targets
Returns:
X:
node [batch_size*molecules_num_nodes, node_dim] : nodes states, variable size per batch
edge_feats [batch_size*molecules_num_edges, edge_dim] : edges states, variable size per batch
edge_index [batch_size*molecules_num_edges, 2] : Index edges of the same molecule
node_index [batch_size*molecules_num_nodes, 1] : Index nodes of the same molecule
coupling_index
Y:
targets [N_coupling, 4]: tuple of four targets (scalar_coupling, coupling_gaussrank, coupling_contribs, coupling_type)
Test info:
infor: the ids of the coupling needed for build submission files # N_coupling,
"""
batch_node, batch_edge, batch_coupling, batch_num_node, batch_num_edge, batch_num_coupling = batch
batch_node = batch_node.reshape(-1, NODE_MAX, 7).float()
batch_edge = batch_edge.reshape(-1, EDGE_MAX, 5)
batch_coupling = batch_coupling.reshape(-1, COUPLING_MAX, 10)
batch_size = batch_node.shape[0]
#### Create nodes / edges / coupling masks : optimized V1 : TO be optimized
mask = torch.cat([F.pad(torch.ones(i, device='cuda'), (0,NODE_MAX-i)).unsqueeze(0) for i in batch_num_node], dim=0)
mask_coupling = torch.cat([F.pad(torch.ones(i, device='cuda'), (0,COUPLING_MAX-i)).unsqueeze(0) for i in batch_num_coupling], dim=0)
mask_edge = torch.cat([F.pad(torch.ones(i, device='cuda'), (0,EDGE_MAX-i)).unsqueeze(0) for i in batch_num_edge], dim=0)
#### Build the output X:
# Get effective nodes / edges / coupling values : without padding
node = batch_node[mask.bool()].view(-1, 7)
edge = batch_edge[mask_edge.bool()].view(-1, 5)
coupling = batch_coupling[mask_coupling.bool()].view(-1, 10)
# node indices track nodes of the same molecule
node_index = mask.nonzero()[:, 0]
# Get edges feats and indices
edge_feats = edge[:, 2:]
edge_index = edge[:, :2].long()
# Get coupling index
coupling_index = coupling[:, :2].long()
# batch_coupling_index : track coupling values of the same molecule
batch_coupling_index = mask_coupling.nonzero()[:, 0]
# offset edge and coupling indices w.r.t to N of nodes in each molecule
offset = torch.cat([torch.zeros(1, device='cuda').long(), batch_num_node[:-1]]).cumsum(0)
#edge
expanded_offset = torch.cat([torch.zeros(num_edges, device='cuda')+offset[mol_index] for mol_index,num_edges in
enumerate(batch_num_edge)], 0)
edge_index = torch.add(edge_index, expanded_offset.unsqueeze(1).long())
#coupling
expanded_offset=torch.cat([torch.zeros(n_coupling, device='cuda')+offset[mol_index] for mol_index,n_coupling in
enumerate(batch_num_coupling)], 0)
coupling_index = torch.add(coupling_index, expanded_offset.unsqueeze(1).long())
# type_id
coupling_type = coupling[:, 2].long()
# new coupling index: atom_0, atom_1, coupling_type, batch_index
coupling_index = torch.cat([coupling_index, coupling_type.view(-1,1) , batch_coupling_index.view(-1, 1)], -1)
#### Get Targets
# 4 coupling contirbutions
coupling_contribution = coupling[:, 5:9]
#coupling value
coupling_value = coupling[:, 3]
#gauss_rank
gaussrank = coupling[:, 4]
targets = [coupling_value.float(), gaussrank.float(), coupling_contribution.float(), coupling_type.long()]
#### ids for inference time
infor = coupling[ : , 9]
# We don't use sequence data of the shortest path
coupling_type_sequence, coupling_atomic = [], []
# mode flag to return additional information for test data
if mode == 'test':
return (node, edge_feats, edge_index, node_index, coupling_index, coupling_type_sequence, coupling_atomic), targets, infor
return (node, edge_feats, edge_index, node_index, coupling_index, coupling_type_sequence, coupling_atomic), targets
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/callback.py | import numpy as np
import pandas as pd
from fastai.callbacks import SaveModelCallback
from fastai.callbacks import Callback
from fastai.torch_core import add_metrics
import torch
from torch import nn
import torch.nn.functional as F
import pdb
from mpnn_model.common_constants import NUM_COUPLING_TYPE, COUPLING_TYPE_MEAN, COUPLING_TYPE_STD, REVERSE_COUPLING_TYPE
__all__ = ['get_reverse_frame', 'lmae', 'compute_kaggle_metric', 'LMAE']
# reverse the gaussrank predictions to the actual distribution
def get_reverse_frame(test_id, predictions, coupling_type, target, grm,):
preds = pd.DataFrame(predictions)
preds['type_ind'] = coupling_type
preds.columns = ['prediction', 'type_ind']
preds['type'] = preds['type_ind'].map(REVERSE_COUPLING_TYPE)
preds['id'] = test_id
preds['true_scalar_coupling_constant'] = target
preds['scalar_coupling_constant'] = grm.convert_df(preds, from_coupling=False)
return preds
# Compute lmae of scalar coupling with respect to the type `
def lmae(truth,pred,types):
# Log of the Mean Absolute Error
# will make it per type later
df = pd.DataFrame({'truth':truth,'pred':pred,'types':types})
df['err'] = np.abs(df['truth']-df['pred'])
x = df.groupby('types')['err'].mean().values
x = np.log(1e-8+x)
return np.mean(x)
# lmae w.r.t 8 coupling types : kaggle metric
def compute_kaggle_metric(predict, coupling_value, coupling_type):
"""
predict lmae loss w.r.t the coupling type
Arguments:
- predict: type(Array) array of scalar coupling predictions returned by the model
- coupling_value: type(Array ) True coupling values
- coupling_type: type(Array) True coupling type
Returns:
- mae, log_mae : the mean and log mean absolute error between predictions and true labels
"""
mae = [None]*NUM_COUPLING_TYPE
log_mae = [None]*NUM_COUPLING_TYPE
diff = np.fabs(predict-coupling_value)
for t in range(NUM_COUPLING_TYPE):
index = np.where(coupling_type==t)[0]
if len(index)>0:
m = diff[index].mean()
log_m = np.log(m+1e-8)
mae[t] = m
log_mae[t] = log_m
else:
pass
return mae, log_mae
# Callback to calculate LMAE at the end of each epoch
class LMAE(Callback):
'''
Comput LMAE for the prediction of the coupling value
'''
_order = -20 #Needs to run before the recorder
def __init__(self, learn,grm, predict_type=False, normalize_coupling=False, coupling_rank=True, **kwargs):
self.learn = learn
self.predict_type = predict_type
self.normalize_coupling = normalize_coupling
self.grm = grm
self.coupling_rank = coupling_rank
def on_train_begin(self, **kwargs): self.learn.recorder.add_metric_names(['LMAE'])
def on_epoch_begin(self, **kwargs): self.output, self.target, self.types = [], [], []
def on_batch_end(self, last_target, last_output, train, **kwargs):
if not train:
self.target.append(last_target[0])
self.types.append(last_target[3])
if self.predict_type:
coupling = torch.gather(last_output[0], 1, last_target[3].unsqueeze(1)).view(-1)
self.output.append(coupling)
else:
self.output.append(last_output[0])
def on_epoch_end(self, last_metrics, **kwargs):
if len(self.output) > 0:
output = torch.cat(self.output)
target = torch.cat(self.target)
types = torch.cat(self.types)
if self.normalize_coupling :
# Denormalize w.r.t to type
means = torch.gather(COUPLING_TYPE_MEAN, 0, types)
stds = torch.gather(COUPLING_TYPE_STD, 0, types)
output = (output * stds) + means
target = (target * stds) + means
metric = lmae(output.data.cpu().numpy(), target.data.cpu().numpy(), types.data.cpu().numpy())
elif self.coupling_rank:
# Reverse using grm mapping frames
preds = pd.DataFrame(output.data.cpu().numpy())
preds['type'] = types.data.cpu().numpy()
preds.columns = ['prediction', 'type']
preds['type'] = preds['type'].map(REVERSE_COUPLING_TYPE)
preds['true_scalar_coupling_constant'] = target.data.cpu().numpy()
preds['scalar_coupling_constant'] = self.grm.convert_df(preds, from_coupling=False)
# compute metric for reversed scalar coupling predictions
metric = lmae(preds['scalar_coupling_constant'], preds['true_scalar_coupling_constant'], preds['type'])
else:
preds = output.data.cpu().numpy().reshape(-1,)
type_ = types.data.cpu().numpy()
targets = target.data.cpu().numpy()
metric = lmae(targets, preds, type_)
return add_metrics(last_metrics, [metric])
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/model.py | from mpnn_model.common import *
from torch_scatter import *
from torch_geometric.utils import scatter_
import torch
import torch.nn as nn
import numbers
from mpnn_model.common_model import *
from mpnn_model.regression_head import *
from mpnn_model.message_passing import *
from mpnn_model.RNN_attention import *
__all__ = ['Net' ]
#############################################################################################################
# #
# END-to-END model #
# #
#############################################################################################################
class Net(torch.nn.Module):
def __init__(self,
ConfigParams,
y_range
):
"""
Arguments:
mpnn: Dictionary with all the needed arguments for GraphConv and Set2Set modules.
regression: Dictionary with all the needed arguments for regression output module.
batch_size:
num_target:
predict_type:
"""
super(Net, self).__init__()
self.encoding = ConfigParams['model']['mpnn']['node_encoder']['encoding']
self.num_output = ConfigParams['model']['regression']['num_output']
self.predict_type = ConfigParams['model']['regression']['predict_type']
self.y_range = y_range
self.node_dim = ConfigParams['model']['mpnn']['edge_encoder']['node_dim']
self.num_target = ConfigParams['model']['regression']['num_target']
self.num_type = ConfigParams['model']['num_type']
self.RNN = ConfigParams['model']['RNN']
self.device = ConfigParams['train']['device']
###################------------- MPNN representation ---------------####################
self.num_propagate = ConfigParams['model']['mpnn']['T_steps']
# Process the nodes features
self.preprocess = get_node_encoder(**ConfigParams['model']['mpnn']['node_encoder'])
# Message
self.message_function = MessagePassing(ConfigParams)
#Update
self.update_function = GRUUpdate(ConfigParams)
#readout
self.readout = Set2Set(**ConfigParams['model']['mpnn']['Set2Set'])
###################------------- RNN representation ---------------####################
if self.RNN:
self.rnn_attention = BI_RNN_Nodes(**ConfigParams['model']['node_seq'])
self.default_node_vector = torch.distributions.uniform.Uniform(-1.0 / math.sqrt(self.node_dim),
1.0 / math.sqrt(self.node_dim)).sample_n(self.node_dim)
if self.device == 'cuda':
self.default_node_vector = self.default_node_vector.cuda()
###################---------------- Build predictions ------------------######################
# embed type with one single output for all the types:
if self.num_target == 1 and not self.predict_type:
self.type_embedding = nn.Embedding(16, 32, padding_idx=0)
self.dense_layer, self.classify, self.predict, self.predicition_layers = get_regression_module(**ConfigParams['model']['regression'])
def forward(self,
node,
edge,
edge_index,
node_index,
coupling_index,
bond_type,
x_atomic):
num_node, node_dim = node.shape
num_edge, edge_dim = edge.shape
#--- Build the graph representation using MPNN
# Process nodes representation
if self.encoding == 'one_hot':
node = self.preprocess(node)
elif self.encoding == 'label':
node_cat, node_cont = node[:,:6].long(), node[:,-1].view(-1,1).float()
node = self.preprocess(node_cat, node_cont)
# T-steps of message updates
for i in range(self.num_propagate):
# node <- h_v^t
messages = self.message_function(node, edge_index, edge, reuse_graph_tensors=(i != 0)) # m_v^t+1 = sum_w(E_vw * h_vw^t)
node = self.update_function(messages, node) # h_v^t+1 = GRU(m_v^t+1, h_v^t)
# K-steps of readout function
pool = self.readout(node, node_index)
if self.RNN:
#--- Get indices of the atoms in the coupling shortest path
num_coupling = len(coupling_index)
coupling_atom0_index, coupling_atom1_index, coupling_atom2_index, coupling_atom3_index, coupling_type_index, coupling_batch_index = \
torch.split(coupling_index,1,dim=1)
# Concatenate the graph representation vecotr 'pool',
pool = torch.index_select( pool, dim=0, index=coupling_batch_index.view(-1))
#pad random unseen node vector to node matrix
node = torch.cat([self.default_node_vector.view(1, -1), node], dim=0)
# build node's embedding sequence
node0 = torch.index_select( node, dim=0, index=coupling_atom0_index.view(-1)+1).unsqueeze(1)
node1 = torch.index_select( node, dim=0, index=coupling_atom1_index.view(-1)+1).unsqueeze(1)
node2 = torch.index_select( node, dim=0, index=coupling_atom2_index.view(-1)+1).unsqueeze(1)
node3 = torch.index_select( node, dim=0, index=coupling_atom3_index.view(-1)+1).unsqueeze(1)
node_seq = torch.cat([node0, node1, node2, node3], dim=1) # bs x 4 x node_dim
# Get attention hidden states
attention_node_seq = self.rnn_attention(node_seq, bond_type.view(-1, 4, 1), x_atomic.view(-1, 4, 1))
# embed type
if not self.predict_type and self.num_type != 1:
coupling_type_embeds = self.type_embedding(coupling_type_index)
input_regression = torch.cat([pool, attention_node_seq, coupling_type_embeds.view(-1, 32)],-1)
else:
input_regression = torch.cat([pool, attention_node_seq],-1)
else:
#--- Get indices of the coupling atoms
num_coupling = len(coupling_index)
coupling_atom0_index, coupling_atom1_index, coupling_type_index, coupling_batch_index = \
torch.split(coupling_index,1,dim=1)
#Concatenate the graph representation vecotr 'pool', the represetation vectors of the nodes :
# coupling_atom0 andcoupling_atom1
pool = torch.index_select( pool, dim=0, index=coupling_batch_index.view(-1))
node0 = torch.index_select( node, dim=0, index=coupling_atom0_index.view(-1))
node1 = torch.index_select( node, dim=0, index=coupling_atom1_index.view(-1))
input_regression = torch.cat([pool,node0,node1],-1)
self.pool = pool
dense_representation = self.dense_layer(input_regression)
#---Get the outputs : coupling_preds, contribution_preds, type_classes :
#w.r.t the two flags : num_output (1: scalar vs 5: scalar+contribution) & predict_type: False (use the actual type) Vs True (predict the type)
predict_type = []
contribution_preds = []
#--- Get the regression predictions w.r.t the coupling type
if self.num_output ==1:
predict = self.predict(dense_representation)
coupling_preds = (self.y_range[1]-self.y_range[0]) * torch.sigmoid(predict) + self.y_range[0]
# when the model predicts a vector w.r.t target type (8) : Additional condition on jointly predict the type or not
if self.num_target != 1:
if self.predict_type:
predict_type = self.classify(dense_representation)
else:
coupling_preds = torch.gather(predict, 1, coupling_type_index).view(-1)
coupling_preds = (self.y_range[1]-self.y_range[0]) * torch.sigmoid(coupling_preds) + self.y_range[0]
elif self.num_output==5:
# get 5 dim prediction vector for each type : only works when num_targets = 8, not implemented for
if num_target==1:
raise LookupError('Predicting coupling contributions only implemented for multi-types model')
preds = [self.predicition_layers[i](dense_representation).view(-1, 1, 5) for i in range(8)]
predict = torch.cat(preds, dim=1)
predict = torch.gather(predict, 1, coupling_type_index.view(-1, 1, 1).expand(predict.size(0), 1,
predict.size(2))).squeeze()
contribution_preds = predict[:,1:].view(-1, 4)
coupling_preds = predict[:,0].view(-1)
coupling_preds = (self.y_range[1]-self.y_range[0]) * torch.sigmoid(coupling_preds) + self.y_range[0]
return [coupling_preds, contribution_preds, predict_type]
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/RNN_attention.py | from mpnn_model.common import *
import torch
import torch.nn as nn
#############################################################################################################
# #
# Nodes sequence : Attention-Bidirectional #
# #
#############################################################################################################
class BI_RNN_Nodes(torch.nn.Module):
def attention_neuralnet(self, rnn_out, state):
"""
#### credit to : https://github.com/wabyking/TextClassificationBenchmark
"""
merged_state = torch.cat([s for s in state],1) # merge the hidden states of the two directions
merged_state = merged_state.squeeze(0).unsqueeze(2)
# (batch, seq_len, cell_size) * (batch, cell_size, 1) = (batch, seq_len, 1)
weights = torch.bmm(rnn_out, merged_state)
weights = torch.nn.functional.softmax(weights.squeeze(2)).unsqueeze(2)
# (batch, cell_size, seq_len) * (batch, seq_len, 1) = (batch, cell_size, 1)
return torch.bmm(torch.transpose(rnn_out, 1, 2), weights).squeeze(2), weights
def __init__(self,
node_dim,
hidden_size,
num_layers,
dropout,
batch_first,
bidirectional,
rnn_model='LSTM',
attention=True):
super(BI_RNN_Nodes, self).__init__()
self.type_encoder = nn.Embedding(16, 32, padding_idx=0)
self.atomic_encoder = nn.Embedding(16, 32, padding_idx=0)
self.attention = attention
if rnn_model == 'LSTM':
self.rnn = nn.LSTM(input_size= node_dim + 64, hidden_size=hidden_size,
num_layers=num_layers, dropout=dropout,
batch_first=batch_first, bidirectional=bidirectional)
else:
raise LookupError('only support LSTM ')
def forward(self, x_nodes, x_coupling_type, x_atomic):
'''
x_nodes [batch_size x 4 x node_dim] : sequence of nodes embeddings of the coupling's shortest path
x_coupling_type [batch_size x 4 x 1]: sequence of in-coming bond type
X_atomic [batch_size x 4 x 1]: sequence of node's atomic number
'''
x_type = self.type_encoder(x_coupling_type+1).squeeze() # +1 to encode padded/missing values to 0
x_atomic = self.atomic_encoder(x_atomic+1).squeeze()
x = torch.cat([x_nodes, x_type, x_atomic], dim=2)
rnn_out, (final_hidden_state, final_cell_state) = self.rnn(x, None)
if self.attention:
last_tensor_item, weights = self.attention_neuralnet(rnn_out, final_hidden_state)
else:
# use mean instead of weighted attention
last_tensor = rnn_out[row_indices, :, :]
last_tensor_item = torch.mean(last_tensor, dim=1)
return last_tensor_item
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/radam.py | import math
import torch
from torch.optim.optimizer import Optimizer, required
class RAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
class PlainRAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(PlainRAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(PlainRAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
class AdamW(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, warmup = 0):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, warmup = warmup)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
if group['warmup'] > state['step']:
scheduled_lr = 1e-8 + state['step'] * group['lr'] / group['warmup']
else:
scheduled_lr = group['lr']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * scheduled_lr, p_data_fp32)
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
p.data.copy_(p_data_fp32)
return loss | 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/helpers.py | import yaml
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
#############################################################################################################
# #
# Load experiment configuration #
# #
#############################################################################################################
import yaml
def load_cfg(yaml_filepath):
"""
Load a YAML configuration file.
Parameters
----------
yaml_filepath : str
Returns
-------
cfg : dict
"""
# Read YAML experiment definition file
with open(yaml_filepath, 'r') as stream:
cfg = yaml.load(stream)
return cfg
| 0 |
rapidsai_public_repos/deeplearning/champs-scalar-coupling | rapidsai_public_repos/deeplearning/champs-scalar-coupling/mpnn_model/regression_head.py | from mpnn_model.common import *
from mpnn_model.common_model import *
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['get_regression_module']
#############################################################################################################
# #
# Output models #
# #
#############################################################################################################
def get_regression_module(num_output=1,
input_dim=128,
shared_layers=[1024, 512],
activation='relu',
dropout= 0.,
branch_layers=[],
num_target=8,
predict_type =False):
'''
Regression module
Args:
num_output: [1, 5]: Whether to predict only scalar coupling or scalar coupling + the 4 contributions
input_dim: the dimension of regression's head input:
Combination of Graph representation, nodes' reprensentation of the coupling edge, nodes sequence hidden states.
shared_layers: the dimension of the fully connected network shared between all the possible model's outputs
activation:
dropout: probability dropout for regresson regularization
branch_layers: the fully connected branch network to predict each contribution value
num_target: Whether to predict all the coupling type or fine-tune a single model per type
predict_type: For num_output =1, whether to jointly predict the bond type or to embed the categorical variable "bond type"
Outputs: 4 branches
dense_layer: shared branch that learns a dense representation from the concatenation of a combination of
Graph representation, nodes' reprensentation of the coupling edge, .
classify: if predict_type==True, Classification branch that computes the logits of the 8 classes of coupling type
predict: if num_output==1, Regression branch that computes scalar coupling constant vector: 8 values (per type)
predicition_layers: if num_output==5, 8 regression branches (one for each coupling types) that computes
the scalar coupling constant and the contribution components.
'''
predicition_layers = []
classify =[]
predict = []
dense_layer = LinearBn(input_dim, shared_layers[0], act=activation)
### Whether to predict only scalar coupling or scalar coupling + the 4 contributions
if num_output==1:
predict = nn.Sequential(
MlpBn(shared_layers[0], dimensions=shared_layers[1:], activation=activation, dropout=dropout),
nn.Linear(shared_layers[-1], num_target)
)
### Whether to jointly predict the bond type or to embed the categorical variable "bond type"
if predict_type:
classify = nn.Sequential(
LinearBn( 1024, 512),
nn.ReLU(inplace=True),
nn.Linear(512, num_target),)
elif num_output == 5:
model = nn.Sequential(
MlpBn(shared_layers[0],
dimensions=branch_layers,
activation=activation,
dropout=dropout),
nn.Linear(branch_layers[-1], num_output))
predicition_layers = nn.ModuleList([model for i in range(num_target)])
return dense_layer, classify, predict, predicition_layers
| 0 |