prompt
stringlengths
94
42.6k
completion
stringlengths
6
120
api
stringlengths
14
68
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) @pytest.mark.skip(reason="could not use opt_level=0 with dump") def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert
isscalar(b)
megengine.core.tensor.utils.isscalar
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) @pytest.mark.skip(reason="could not use opt_level=0 with dump") def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) def test_trace_reshape(): for symbolic in [False, True]: x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @
trace(symbolic=symbolic, capture_as_const=True)
megengine.jit.trace
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) @pytest.mark.skip(reason="could not use opt_level=0 with dump") def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) def test_trace_reshape(): for symbolic in [False, True]: x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y =
F.topk(x, 3)
megengine.functional.topk
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) @pytest.mark.skip(reason="could not use opt_level=0 with dump") def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) def test_trace_reshape(): for symbolic in [False, True]: x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out =
F.warp_perspective(x, M, (2, 2))
megengine.functional.warp_perspective
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) @pytest.mark.skip(reason="could not use opt_level=0 with dump") def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) def test_trace_reshape(): for symbolic in [False, True]: x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(1): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 def test_trace_broadcast(): for symbolic in [False, True]: x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @
trace(symbolic=symbolic, capture_as_const=True)
megengine.jit.trace
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) @pytest.mark.skip(reason="could not use opt_level=0 with dump") def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) def test_trace_reshape(): for symbolic in [False, True]: x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(1): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 def test_trace_broadcast(): for symbolic in [False, True]: x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return tensor(boxes), tensor(scores) @trace(symbolic=False) def f(boxes, scores): # with tracing, max_output must be specified results =
F.nn.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20)
megengine.functional.nn.nms
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) @pytest.mark.skip(reason="could not use opt_level=0 with dump") def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) def test_trace_reshape(): for symbolic in [False, True]: x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(1): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 def test_trace_broadcast(): for symbolic in [False, True]: x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return tensor(boxes), tensor(scores) @trace(symbolic=False) def f(boxes, scores): # with tracing, max_output must be specified results = F.nn.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20) # without tracing, max output can be inferred inside nms with exclude_from_trace(): _ = F.nn.nms(boxes, scores=scores, iou_thresh=0.5) return results f(*make_inputs(10)) f(*make_inputs(20)) f(*make_inputs(30)) def test_trace_valid_broadcast(): x1 = tensor(np.random.randn(1, 1)) x2 = tensor(np.random.randn(1, 2)) shape = (
tensor([2])
megengine.tensor
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) @pytest.mark.skip(reason="could not use opt_level=0 with dump") def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) def test_trace_reshape(): for symbolic in [False, True]: x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(1): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 def test_trace_broadcast(): for symbolic in [False, True]: x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return tensor(boxes), tensor(scores) @trace(symbolic=False) def f(boxes, scores): # with tracing, max_output must be specified results = F.nn.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20) # without tracing, max output can be inferred inside nms with exclude_from_trace(): _ = F.nn.nms(boxes, scores=scores, iou_thresh=0.5) return results f(*make_inputs(10)) f(*make_inputs(20)) f(*make_inputs(30)) def test_trace_valid_broadcast(): x1 = tensor(np.random.randn(1, 1)) x2 = tensor(np.random.randn(1, 2)) shape = (tensor([2]),
tensor([2])
megengine.tensor
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) @pytest.mark.skip(reason="could not use opt_level=0 with dump") def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) def test_trace_reshape(): for symbolic in [False, True]: x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(1): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 def test_trace_broadcast(): for symbolic in [False, True]: x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return tensor(boxes), tensor(scores) @trace(symbolic=False) def f(boxes, scores): # with tracing, max_output must be specified results = F.nn.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20) # without tracing, max output can be inferred inside nms with exclude_from_trace(): _ = F.nn.nms(boxes, scores=scores, iou_thresh=0.5) return results f(*make_inputs(10)) f(*make_inputs(20)) f(*make_inputs(30)) def test_trace_valid_broadcast(): x1 = tensor(np.random.randn(1, 1)) x2 = tensor(np.random.randn(1, 2)) shape = (tensor([2]), tensor([2])) @trace(symbolic=False) def f(x, shape): y =
F.broadcast_to(x, shape)
megengine.functional.broadcast_to
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) @pytest.mark.skip(reason="could not use opt_level=0 with dump") def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) def test_trace_reshape(): for symbolic in [False, True]: x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(1): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 def test_trace_broadcast(): for symbolic in [False, True]: x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return tensor(boxes), tensor(scores) @trace(symbolic=False) def f(boxes, scores): # with tracing, max_output must be specified results = F.nn.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20) # without tracing, max output can be inferred inside nms with exclude_from_trace(): _ = F.nn.nms(boxes, scores=scores, iou_thresh=0.5) return results f(*make_inputs(10)) f(*make_inputs(20)) f(*make_inputs(30)) def test_trace_valid_broadcast(): x1 = tensor(np.random.randn(1, 1)) x2 = tensor(np.random.randn(1, 2)) shape = (tensor([2]), tensor([2])) @trace(symbolic=False) def f(x, shape): y = F.broadcast_to(x, shape) return y f(x1, shape) f(x2, shape) def test_clip(): x = tensor(np.random.randn(10, 10)) @trace(symbolic=True) def f(x, lower, upper): y =
F.clip(x, lower, upper)
megengine.functional.clip
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a =
Parameter([1.0], dtype=np.float32)
megengine.Parameter
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x =
F.exp(x)
megengine.functional.exp
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm =
GradManager()
megengine.autodiff.GradManager
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) @pytest.mark.skip(reason="could not use opt_level=0 with dump") def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(
exp(x)
megengine.functional.exp
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) @pytest.mark.skip(reason="could not use opt_level=0 with dump") def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(
exp(x)
megengine.functional.exp
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) @pytest.mark.skip(reason="could not use opt_level=0 with dump") def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(
tensor([1, 10], dtype=np.int32)
megengine.tensor
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) @pytest.mark.skip(reason="could not use opt_level=0 with dump") def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) def test_trace_reshape(): for symbolic in [False, True]: x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(1): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 def test_trace_broadcast(): for symbolic in [False, True]: x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y =
F.broadcast_to(x, (3, 4, 5))
megengine.functional.broadcast_to
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) @pytest.mark.skip(reason="could not use opt_level=0 with dump") def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) def test_trace_reshape(): for symbolic in [False, True]: x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(1): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 def test_trace_broadcast(): for symbolic in [False, True]: x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return
tensor(boxes)
megengine.tensor
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) @pytest.mark.skip(reason="could not use opt_level=0 with dump") def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) def test_trace_reshape(): for symbolic in [False, True]: x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(1): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 def test_trace_broadcast(): for symbolic in [False, True]: x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return tensor(boxes),
tensor(scores)
megengine.tensor
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) @pytest.mark.skip(reason="could not use opt_level=0 with dump") def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) def test_trace_reshape(): for symbolic in [False, True]: x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(1): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 def test_trace_broadcast(): for symbolic in [False, True]: x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return tensor(boxes), tensor(scores) @trace(symbolic=False) def f(boxes, scores): # with tracing, max_output must be specified results = F.nn.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20) # without tracing, max output can be inferred inside nms with
exclude_from_trace()
megengine.jit.exclude_from_trace
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) @pytest.mark.skip(reason="could not use opt_level=0 with dump") def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) def test_trace_reshape(): for symbolic in [False, True]: x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(1): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 def test_trace_broadcast(): for symbolic in [False, True]: x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return tensor(boxes), tensor(scores) @trace(symbolic=False) def f(boxes, scores): # with tracing, max_output must be specified results = F.nn.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20) # without tracing, max output can be inferred inside nms with exclude_from_trace(): _ =
F.nn.nms(boxes, scores=scores, iou_thresh=0.5)
megengine.functional.nn.nms
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) @pytest.mark.skip(reason="could not use opt_level=0 with dump") def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) def test_trace_reshape(): for symbolic in [False, True]: x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(1): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 def test_trace_broadcast(): for symbolic in [False, True]: x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return tensor(boxes), tensor(scores) @trace(symbolic=False) def f(boxes, scores): # with tracing, max_output must be specified results = F.nn.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20) # without tracing, max output can be inferred inside nms with exclude_from_trace(): _ = F.nn.nms(boxes, scores=scores, iou_thresh=0.5) return results f(*make_inputs(10)) f(*make_inputs(20)) f(*make_inputs(30)) def test_trace_valid_broadcast(): x1 = tensor(np.random.randn(1, 1)) x2 = tensor(np.random.randn(1, 2)) shape = (tensor([2]), tensor([2])) @trace(symbolic=False) def f(x, shape): y = F.broadcast_to(x, shape) return y f(x1, shape) f(x2, shape) def test_clip(): x = tensor(np.random.randn(10, 10)) @trace(symbolic=True) def f(x, lower, upper): y = F.clip(x, lower, upper) return y for i in range(3): f(x,
tensor([0])
megengine.tensor
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) @pytest.mark.skip(reason="could not use opt_level=0 with dump") def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) def test_trace_reshape(): for symbolic in [False, True]: x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(1): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 def test_trace_broadcast(): for symbolic in [False, True]: x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return tensor(boxes), tensor(scores) @trace(symbolic=False) def f(boxes, scores): # with tracing, max_output must be specified results = F.nn.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20) # without tracing, max output can be inferred inside nms with exclude_from_trace(): _ = F.nn.nms(boxes, scores=scores, iou_thresh=0.5) return results f(*make_inputs(10)) f(*make_inputs(20)) f(*make_inputs(30)) def test_trace_valid_broadcast(): x1 = tensor(np.random.randn(1, 1)) x2 = tensor(np.random.randn(1, 2)) shape = (tensor([2]), tensor([2])) @trace(symbolic=False) def f(x, shape): y = F.broadcast_to(x, shape) return y f(x1, shape) f(x2, shape) def test_clip(): x = tensor(np.random.randn(10, 10)) @trace(symbolic=True) def f(x, lower, upper): y = F.clip(x, lower, upper) return y for i in range(3): f(x, tensor([0]),
tensor([1])
megengine.tensor
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) @pytest.mark.skip(reason="could not use opt_level=0 with dump") def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) def test_trace_reshape(): for symbolic in [False, True]: x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(1): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 def test_trace_broadcast(): for symbolic in [False, True]: x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return tensor(boxes), tensor(scores) @trace(symbolic=False) def f(boxes, scores): # with tracing, max_output must be specified results = F.nn.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20) # without tracing, max output can be inferred inside nms with exclude_from_trace(): _ = F.nn.nms(boxes, scores=scores, iou_thresh=0.5) return results f(*make_inputs(10)) f(*make_inputs(20)) f(*make_inputs(30)) def test_trace_valid_broadcast(): x1 = tensor(np.random.randn(1, 1)) x2 = tensor(np.random.randn(1, 2)) shape = (tensor([2]), tensor([2])) @trace(symbolic=False) def f(x, shape): y = F.broadcast_to(x, shape) return y f(x1, shape) f(x2, shape) def test_clip(): x = tensor(np.random.randn(10, 10)) @trace(symbolic=True) def f(x, lower, upper): y = F.clip(x, lower, upper) return y for i in range(3): f(x, tensor([0]), tensor([1])) # test returning noncontiguous tensor from trace def test_slice(): @trace def f(x): return x[:, 1::2] x =
F.arange(8)
megengine.functional.arange
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) @pytest.mark.skip(reason="could not use opt_level=0 with dump") def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) def test_trace_reshape(): for symbolic in [False, True]: x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(1): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 def test_trace_broadcast(): for symbolic in [False, True]: x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=symbolic, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return tensor(boxes), tensor(scores) @trace(symbolic=False) def f(boxes, scores): # with tracing, max_output must be specified results = F.nn.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20) # without tracing, max output can be inferred inside nms with exclude_from_trace(): _ = F.nn.nms(boxes, scores=scores, iou_thresh=0.5) return results f(*make_inputs(10)) f(*make_inputs(20)) f(*make_inputs(30)) def test_trace_valid_broadcast(): x1 = tensor(np.random.randn(1, 1)) x2 = tensor(np.random.randn(1, 2)) shape = (tensor([2]), tensor([2])) @trace(symbolic=False) def f(x, shape): y = F.broadcast_to(x, shape) return y f(x1, shape) f(x2, shape) def test_clip(): x = tensor(np.random.randn(10, 10)) @trace(symbolic=True) def f(x, lower, upper): y = F.clip(x, lower, upper) return y for i in range(3): f(x, tensor([0]), tensor([1])) # test returning noncontiguous tensor from trace def test_slice(): @trace def f(x): return x[:, 1::2] x = F.arange(8).reshape(2, 4) f(x) y = f(x) np.testing.assert_array_equal(y.numpy(), x.numpy()[:, 1::2]) y + y def test_random(): def run_test(op): for symbolic_shape in [True, False]: @
trace(symbolic=True, symbolic_shape=symbolic_shape)
megengine.jit.trace
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with
exclude_from_trace()
megengine.jit.exclude_from_trace
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(
cgtools.get_owner_opr_inputs(out)
megengine.utils.comp_graph_tools.get_owner_opr_inputs
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(
exp(x)
megengine.functional.exp
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) +
exp(y)
megengine.functional.exp
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(
exp(x)
megengine.functional.exp
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import io from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform def test_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): return -x x = tensor([1]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=False) def train_f1(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss @trace(symbolic=True) def train_f2(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(2): y1 = train_f1(data).numpy() y2 = train_f2(data).numpy() np.testing.assert_equal(y1, y2) def test_exclude_from_trace(): for symbolic in [False, True]: @trace(symbolic=symbolic) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"]) file.seek(0) result = cgtools.load_and_inference(file, [a, b]) np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) result = cgtools.load_and_inference(file, [x]) np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) def test_trace_profiler(): for symbolic in [False, True]: @trace(symbolic=symbolic, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) @pytest.mark.skip(reason="force opt_level=0 when building graph") def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) +
exp(y)
megengine.functional.exp
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020 <NAME> # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """EfficientNet Series EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_ References: https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py """ import math from numbers import Real from typing import Any, Callable, Mapping, Sequence, Union import megengine.hub as hub import megengine.module as M from basecls.layers import ( SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d, ) from basecls.utils import recursive_update, registers from .mbnet import MBConv from .resnet import AnyStage, SimpleStem __all__ = ["FuseMBConv", "EffNet"] class FuseMBConv(M.Module): """Fusing the proj conv1x1 and depthwise conv into a conv2d. Args: w_in: input width. w_out: output width. stride: stride of conv. kernel: kernel of conv. exp_r: expansion ratio. se_r: SE ratio. has_skip: whether apply skip connection. drop_path_prob: drop path probability. norm_name: normalization function. act_name: activation function. """ def __init__( self, w_in: int, w_out: int, stride: int, kernel: int, exp_r: float, se_r: float, has_skip: bool, drop_path_prob: float, norm_name: str, act_name: str, **kwargs, ): super().__init__() # Expansion w_mid = w_in w_exp = int(w_in * exp_r) if exp_r != 1.0: self.exp = conv2d(w_in, w_exp, kernel, stride=stride) self.exp_bn = norm2d(norm_name, w_exp) self.exp_act = activation(act_name) w_mid = w_exp # SE if se_r > 0.0: w_se = int(w_in * se_r) self.se = SE(w_mid, w_se, act_name) # PWConv self.proj = conv2d( w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride ) self.proj_bn = norm2d(norm_name, w_out) self.has_proj_act = exp_r == 1.0 if self.has_proj_act: self.proj_act = activation(act_name) # Skip self.has_skip = has_skip and w_in == w_out and stride == 1 if self.has_skip: self.drop_path = DropPath(drop_path_prob) def forward(self, x): x_p = x if getattr(self, "exp", None) is not None: x = self.exp(x) x = self.exp_bn(x) x = self.exp_act(x) if getattr(self, "se", None) is not None: x = self.se(x) x = self.proj(x) x = self.proj_bn(x) if self.has_proj_act: x = self.proj_act(x) if self.has_skip: x = self.drop_path(x) x = x + x_p return x @registers.models.register() class EffNet(M.Module): """EfficientNet model. Args: stem_w: stem width. block_name: block name. depths: depth for each stage (number of blocks in the stage). widths: width for each stage (width of each block in the stage). strides: strides for each stage (applies to the first block of each stage). kernels: kernel sizes for each stage. exp_rs: expansion ratios for MBConv blocks in each stage. se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25`` drop_path_prob: drop path probability. Default: ``0.0`` depth_mult: depth multiplier. Default: ``1.0`` width_mult: width multiplier. Default: ``1.0`` omit_mult: omit multiplier for stem width, head width, the first stage depth and the last stage depth, enabled in EfficientNet-Lite. Default: ``False`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"silu"`` head: head args. Default: ``None`` """ def __init__( self, stem_w: int, block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]], depths: Sequence[int], widths: Sequence[int], strides: Sequence[int], kernels: Sequence[int], exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0, se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0, drop_path_prob: float = 0.0, depth_mult: float = 1.0, width_mult: float = 1.0, omit_mult: bool = False, norm_name: str = "BN", act_name: str = "silu", head: Mapping[str, Any] = None, ): super().__init__() depths = [ d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult) for i, d in enumerate(depths) ] self.depths = depths stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9) self.stem = SimpleStem(3, stem_w, norm_name, act_name) if isinstance(block_name, (str, Callable)): block_name = [block_name] * len(depths) block_func = [self.get_block_func(bn) for bn in block_name] widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths] if isinstance(exp_rs, Real): exp_rs = [exp_rs] * len(depths) if isinstance(se_rs, Real): se_rs = [se_rs] * len(depths) drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths))) drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths] model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs] prev_w = stem_w for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)): stage = AnyStage( prev_w, w, s, d, bf, kernel=k, exp_r=exp_r, se_r=se_r, se_from_exp=False, se_act_name=act_name, se_approx=False, se_rd_fn=int, has_proj_act=False, has_skip=True, drop_path_prob=dp_p, norm_name=norm_name, act_name=act_name, ) setattr(self, f"s{i + 1}", stage) prev_w = w if head: if head.get("width", 0) > 0 and not omit_mult: head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9) self.head = build_head(prev_w, head, norm_name, act_name) self.apply(init_weights) def forward(self, x): x = self.stem(x) for i in range(len(self.depths)): stage = getattr(self, f"s{i + 1}") x = stage(x) if getattr(self, "head", None) is not None: x = self.head(x) return x @staticmethod def get_block_func(name: Union[str, Callable]): """Retrieves the block function by name.""" if callable(name): return name if isinstance(name, str): block_funcs = { "FuseMBConv": FuseMBConv, "MBConv": MBConv, } if name in block_funcs.keys(): return block_funcs[name] raise ValueError(f"Block '{name}' not supported") def _build_effnet(**kwargs): model_args = dict( stem_w=32, block_name=MBConv, depths=[1, 2, 2, 3, 3, 4, 1], widths=[16, 24, 40, 80, 112, 192, 320], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 5, 3, 5, 5, 3], exp_rs=[1, 6, 6, 6, 6, 6, 6], se_rs=0.25, drop_path_prob=0.2, head=dict(name="ClsHead", width=1280, dropout_prob=0.2), ) recursive_update(model_args, kwargs) return EffNet(**model_args) def _build_effnet_lite(**kwargs): model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6") recursive_update(model_args, kwargs) return _build_effnet(**model_args) def _build_effnetv2(**kwargs): model_args = dict( stem_w=32, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv], depths=[1, 2, 2, 3, 5, 8], widths=[16, 32, 48, 96, 112, 192], strides=[1, 2, 2, 2, 1, 2], kernels=[3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25], ) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @
hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl" )
megengine.hub.pretrained
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020 <NAME> # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """EfficientNet Series EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_ References: https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py """ import math from numbers import Real from typing import Any, Callable, Mapping, Sequence, Union import megengine.hub as hub import megengine.module as M from basecls.layers import ( SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d, ) from basecls.utils import recursive_update, registers from .mbnet import MBConv from .resnet import AnyStage, SimpleStem __all__ = ["FuseMBConv", "EffNet"] class FuseMBConv(M.Module): """Fusing the proj conv1x1 and depthwise conv into a conv2d. Args: w_in: input width. w_out: output width. stride: stride of conv. kernel: kernel of conv. exp_r: expansion ratio. se_r: SE ratio. has_skip: whether apply skip connection. drop_path_prob: drop path probability. norm_name: normalization function. act_name: activation function. """ def __init__( self, w_in: int, w_out: int, stride: int, kernel: int, exp_r: float, se_r: float, has_skip: bool, drop_path_prob: float, norm_name: str, act_name: str, **kwargs, ): super().__init__() # Expansion w_mid = w_in w_exp = int(w_in * exp_r) if exp_r != 1.0: self.exp = conv2d(w_in, w_exp, kernel, stride=stride) self.exp_bn = norm2d(norm_name, w_exp) self.exp_act = activation(act_name) w_mid = w_exp # SE if se_r > 0.0: w_se = int(w_in * se_r) self.se = SE(w_mid, w_se, act_name) # PWConv self.proj = conv2d( w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride ) self.proj_bn = norm2d(norm_name, w_out) self.has_proj_act = exp_r == 1.0 if self.has_proj_act: self.proj_act = activation(act_name) # Skip self.has_skip = has_skip and w_in == w_out and stride == 1 if self.has_skip: self.drop_path = DropPath(drop_path_prob) def forward(self, x): x_p = x if getattr(self, "exp", None) is not None: x = self.exp(x) x = self.exp_bn(x) x = self.exp_act(x) if getattr(self, "se", None) is not None: x = self.se(x) x = self.proj(x) x = self.proj_bn(x) if self.has_proj_act: x = self.proj_act(x) if self.has_skip: x = self.drop_path(x) x = x + x_p return x @registers.models.register() class EffNet(M.Module): """EfficientNet model. Args: stem_w: stem width. block_name: block name. depths: depth for each stage (number of blocks in the stage). widths: width for each stage (width of each block in the stage). strides: strides for each stage (applies to the first block of each stage). kernels: kernel sizes for each stage. exp_rs: expansion ratios for MBConv blocks in each stage. se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25`` drop_path_prob: drop path probability. Default: ``0.0`` depth_mult: depth multiplier. Default: ``1.0`` width_mult: width multiplier. Default: ``1.0`` omit_mult: omit multiplier for stem width, head width, the first stage depth and the last stage depth, enabled in EfficientNet-Lite. Default: ``False`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"silu"`` head: head args. Default: ``None`` """ def __init__( self, stem_w: int, block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]], depths: Sequence[int], widths: Sequence[int], strides: Sequence[int], kernels: Sequence[int], exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0, se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0, drop_path_prob: float = 0.0, depth_mult: float = 1.0, width_mult: float = 1.0, omit_mult: bool = False, norm_name: str = "BN", act_name: str = "silu", head: Mapping[str, Any] = None, ): super().__init__() depths = [ d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult) for i, d in enumerate(depths) ] self.depths = depths stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9) self.stem = SimpleStem(3, stem_w, norm_name, act_name) if isinstance(block_name, (str, Callable)): block_name = [block_name] * len(depths) block_func = [self.get_block_func(bn) for bn in block_name] widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths] if isinstance(exp_rs, Real): exp_rs = [exp_rs] * len(depths) if isinstance(se_rs, Real): se_rs = [se_rs] * len(depths) drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths))) drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths] model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs] prev_w = stem_w for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)): stage = AnyStage( prev_w, w, s, d, bf, kernel=k, exp_r=exp_r, se_r=se_r, se_from_exp=False, se_act_name=act_name, se_approx=False, se_rd_fn=int, has_proj_act=False, has_skip=True, drop_path_prob=dp_p, norm_name=norm_name, act_name=act_name, ) setattr(self, f"s{i + 1}", stage) prev_w = w if head: if head.get("width", 0) > 0 and not omit_mult: head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9) self.head = build_head(prev_w, head, norm_name, act_name) self.apply(init_weights) def forward(self, x): x = self.stem(x) for i in range(len(self.depths)): stage = getattr(self, f"s{i + 1}") x = stage(x) if getattr(self, "head", None) is not None: x = self.head(x) return x @staticmethod def get_block_func(name: Union[str, Callable]): """Retrieves the block function by name.""" if callable(name): return name if isinstance(name, str): block_funcs = { "FuseMBConv": FuseMBConv, "MBConv": MBConv, } if name in block_funcs.keys(): return block_funcs[name] raise ValueError(f"Block '{name}' not supported") def _build_effnet(**kwargs): model_args = dict( stem_w=32, block_name=MBConv, depths=[1, 2, 2, 3, 3, 4, 1], widths=[16, 24, 40, 80, 112, 192, 320], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 5, 3, 5, 5, 3], exp_rs=[1, 6, 6, 6, 6, 6, 6], se_rs=0.25, drop_path_prob=0.2, head=dict(name="ClsHead", width=1280, dropout_prob=0.2), ) recursive_update(model_args, kwargs) return EffNet(**model_args) def _build_effnet_lite(**kwargs): model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6") recursive_update(model_args, kwargs) return _build_effnet(**model_args) def _build_effnetv2(**kwargs): model_args = dict( stem_w=32, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv], depths=[1, 2, 2, 3, 5, 8], widths=[16, 32, 48, 96, 112, 192], strides=[1, 2, 2, 2, 1, 2], kernels=[3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25], ) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl" ) def effnet_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @
hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl" )
megengine.hub.pretrained
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020 <NAME> # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """EfficientNet Series EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_ References: https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py """ import math from numbers import Real from typing import Any, Callable, Mapping, Sequence, Union import megengine.hub as hub import megengine.module as M from basecls.layers import ( SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d, ) from basecls.utils import recursive_update, registers from .mbnet import MBConv from .resnet import AnyStage, SimpleStem __all__ = ["FuseMBConv", "EffNet"] class FuseMBConv(M.Module): """Fusing the proj conv1x1 and depthwise conv into a conv2d. Args: w_in: input width. w_out: output width. stride: stride of conv. kernel: kernel of conv. exp_r: expansion ratio. se_r: SE ratio. has_skip: whether apply skip connection. drop_path_prob: drop path probability. norm_name: normalization function. act_name: activation function. """ def __init__( self, w_in: int, w_out: int, stride: int, kernel: int, exp_r: float, se_r: float, has_skip: bool, drop_path_prob: float, norm_name: str, act_name: str, **kwargs, ): super().__init__() # Expansion w_mid = w_in w_exp = int(w_in * exp_r) if exp_r != 1.0: self.exp = conv2d(w_in, w_exp, kernel, stride=stride) self.exp_bn = norm2d(norm_name, w_exp) self.exp_act = activation(act_name) w_mid = w_exp # SE if se_r > 0.0: w_se = int(w_in * se_r) self.se = SE(w_mid, w_se, act_name) # PWConv self.proj = conv2d( w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride ) self.proj_bn = norm2d(norm_name, w_out) self.has_proj_act = exp_r == 1.0 if self.has_proj_act: self.proj_act = activation(act_name) # Skip self.has_skip = has_skip and w_in == w_out and stride == 1 if self.has_skip: self.drop_path = DropPath(drop_path_prob) def forward(self, x): x_p = x if getattr(self, "exp", None) is not None: x = self.exp(x) x = self.exp_bn(x) x = self.exp_act(x) if getattr(self, "se", None) is not None: x = self.se(x) x = self.proj(x) x = self.proj_bn(x) if self.has_proj_act: x = self.proj_act(x) if self.has_skip: x = self.drop_path(x) x = x + x_p return x @registers.models.register() class EffNet(M.Module): """EfficientNet model. Args: stem_w: stem width. block_name: block name. depths: depth for each stage (number of blocks in the stage). widths: width for each stage (width of each block in the stage). strides: strides for each stage (applies to the first block of each stage). kernels: kernel sizes for each stage. exp_rs: expansion ratios for MBConv blocks in each stage. se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25`` drop_path_prob: drop path probability. Default: ``0.0`` depth_mult: depth multiplier. Default: ``1.0`` width_mult: width multiplier. Default: ``1.0`` omit_mult: omit multiplier for stem width, head width, the first stage depth and the last stage depth, enabled in EfficientNet-Lite. Default: ``False`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"silu"`` head: head args. Default: ``None`` """ def __init__( self, stem_w: int, block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]], depths: Sequence[int], widths: Sequence[int], strides: Sequence[int], kernels: Sequence[int], exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0, se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0, drop_path_prob: float = 0.0, depth_mult: float = 1.0, width_mult: float = 1.0, omit_mult: bool = False, norm_name: str = "BN", act_name: str = "silu", head: Mapping[str, Any] = None, ): super().__init__() depths = [ d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult) for i, d in enumerate(depths) ] self.depths = depths stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9) self.stem = SimpleStem(3, stem_w, norm_name, act_name) if isinstance(block_name, (str, Callable)): block_name = [block_name] * len(depths) block_func = [self.get_block_func(bn) for bn in block_name] widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths] if isinstance(exp_rs, Real): exp_rs = [exp_rs] * len(depths) if isinstance(se_rs, Real): se_rs = [se_rs] * len(depths) drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths))) drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths] model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs] prev_w = stem_w for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)): stage = AnyStage( prev_w, w, s, d, bf, kernel=k, exp_r=exp_r, se_r=se_r, se_from_exp=False, se_act_name=act_name, se_approx=False, se_rd_fn=int, has_proj_act=False, has_skip=True, drop_path_prob=dp_p, norm_name=norm_name, act_name=act_name, ) setattr(self, f"s{i + 1}", stage) prev_w = w if head: if head.get("width", 0) > 0 and not omit_mult: head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9) self.head = build_head(prev_w, head, norm_name, act_name) self.apply(init_weights) def forward(self, x): x = self.stem(x) for i in range(len(self.depths)): stage = getattr(self, f"s{i + 1}") x = stage(x) if getattr(self, "head", None) is not None: x = self.head(x) return x @staticmethod def get_block_func(name: Union[str, Callable]): """Retrieves the block function by name.""" if callable(name): return name if isinstance(name, str): block_funcs = { "FuseMBConv": FuseMBConv, "MBConv": MBConv, } if name in block_funcs.keys(): return block_funcs[name] raise ValueError(f"Block '{name}' not supported") def _build_effnet(**kwargs): model_args = dict( stem_w=32, block_name=MBConv, depths=[1, 2, 2, 3, 3, 4, 1], widths=[16, 24, 40, 80, 112, 192, 320], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 5, 3, 5, 5, 3], exp_rs=[1, 6, 6, 6, 6, 6, 6], se_rs=0.25, drop_path_prob=0.2, head=dict(name="ClsHead", width=1280, dropout_prob=0.2), ) recursive_update(model_args, kwargs) return EffNet(**model_args) def _build_effnet_lite(**kwargs): model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6") recursive_update(model_args, kwargs) return _build_effnet(**model_args) def _build_effnetv2(**kwargs): model_args = dict( stem_w=32, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv], depths=[1, 2, 2, 3, 5, 8], widths=[16, 32, 48, 96, 112, 192], strides=[1, 2, 2, 2, 1, 2], kernels=[3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25], ) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl" ) def effnet_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl" ) def effnet_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @
hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl" )
megengine.hub.pretrained
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020 <NAME> # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """EfficientNet Series EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_ References: https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py """ import math from numbers import Real from typing import Any, Callable, Mapping, Sequence, Union import megengine.hub as hub import megengine.module as M from basecls.layers import ( SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d, ) from basecls.utils import recursive_update, registers from .mbnet import MBConv from .resnet import AnyStage, SimpleStem __all__ = ["FuseMBConv", "EffNet"] class FuseMBConv(M.Module): """Fusing the proj conv1x1 and depthwise conv into a conv2d. Args: w_in: input width. w_out: output width. stride: stride of conv. kernel: kernel of conv. exp_r: expansion ratio. se_r: SE ratio. has_skip: whether apply skip connection. drop_path_prob: drop path probability. norm_name: normalization function. act_name: activation function. """ def __init__( self, w_in: int, w_out: int, stride: int, kernel: int, exp_r: float, se_r: float, has_skip: bool, drop_path_prob: float, norm_name: str, act_name: str, **kwargs, ): super().__init__() # Expansion w_mid = w_in w_exp = int(w_in * exp_r) if exp_r != 1.0: self.exp = conv2d(w_in, w_exp, kernel, stride=stride) self.exp_bn = norm2d(norm_name, w_exp) self.exp_act = activation(act_name) w_mid = w_exp # SE if se_r > 0.0: w_se = int(w_in * se_r) self.se = SE(w_mid, w_se, act_name) # PWConv self.proj = conv2d( w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride ) self.proj_bn = norm2d(norm_name, w_out) self.has_proj_act = exp_r == 1.0 if self.has_proj_act: self.proj_act = activation(act_name) # Skip self.has_skip = has_skip and w_in == w_out and stride == 1 if self.has_skip: self.drop_path = DropPath(drop_path_prob) def forward(self, x): x_p = x if getattr(self, "exp", None) is not None: x = self.exp(x) x = self.exp_bn(x) x = self.exp_act(x) if getattr(self, "se", None) is not None: x = self.se(x) x = self.proj(x) x = self.proj_bn(x) if self.has_proj_act: x = self.proj_act(x) if self.has_skip: x = self.drop_path(x) x = x + x_p return x @registers.models.register() class EffNet(M.Module): """EfficientNet model. Args: stem_w: stem width. block_name: block name. depths: depth for each stage (number of blocks in the stage). widths: width for each stage (width of each block in the stage). strides: strides for each stage (applies to the first block of each stage). kernels: kernel sizes for each stage. exp_rs: expansion ratios for MBConv blocks in each stage. se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25`` drop_path_prob: drop path probability. Default: ``0.0`` depth_mult: depth multiplier. Default: ``1.0`` width_mult: width multiplier. Default: ``1.0`` omit_mult: omit multiplier for stem width, head width, the first stage depth and the last stage depth, enabled in EfficientNet-Lite. Default: ``False`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"silu"`` head: head args. Default: ``None`` """ def __init__( self, stem_w: int, block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]], depths: Sequence[int], widths: Sequence[int], strides: Sequence[int], kernels: Sequence[int], exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0, se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0, drop_path_prob: float = 0.0, depth_mult: float = 1.0, width_mult: float = 1.0, omit_mult: bool = False, norm_name: str = "BN", act_name: str = "silu", head: Mapping[str, Any] = None, ): super().__init__() depths = [ d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult) for i, d in enumerate(depths) ] self.depths = depths stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9) self.stem = SimpleStem(3, stem_w, norm_name, act_name) if isinstance(block_name, (str, Callable)): block_name = [block_name] * len(depths) block_func = [self.get_block_func(bn) for bn in block_name] widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths] if isinstance(exp_rs, Real): exp_rs = [exp_rs] * len(depths) if isinstance(se_rs, Real): se_rs = [se_rs] * len(depths) drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths))) drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths] model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs] prev_w = stem_w for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)): stage = AnyStage( prev_w, w, s, d, bf, kernel=k, exp_r=exp_r, se_r=se_r, se_from_exp=False, se_act_name=act_name, se_approx=False, se_rd_fn=int, has_proj_act=False, has_skip=True, drop_path_prob=dp_p, norm_name=norm_name, act_name=act_name, ) setattr(self, f"s{i + 1}", stage) prev_w = w if head: if head.get("width", 0) > 0 and not omit_mult: head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9) self.head = build_head(prev_w, head, norm_name, act_name) self.apply(init_weights) def forward(self, x): x = self.stem(x) for i in range(len(self.depths)): stage = getattr(self, f"s{i + 1}") x = stage(x) if getattr(self, "head", None) is not None: x = self.head(x) return x @staticmethod def get_block_func(name: Union[str, Callable]): """Retrieves the block function by name.""" if callable(name): return name if isinstance(name, str): block_funcs = { "FuseMBConv": FuseMBConv, "MBConv": MBConv, } if name in block_funcs.keys(): return block_funcs[name] raise ValueError(f"Block '{name}' not supported") def _build_effnet(**kwargs): model_args = dict( stem_w=32, block_name=MBConv, depths=[1, 2, 2, 3, 3, 4, 1], widths=[16, 24, 40, 80, 112, 192, 320], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 5, 3, 5, 5, 3], exp_rs=[1, 6, 6, 6, 6, 6, 6], se_rs=0.25, drop_path_prob=0.2, head=dict(name="ClsHead", width=1280, dropout_prob=0.2), ) recursive_update(model_args, kwargs) return EffNet(**model_args) def _build_effnet_lite(**kwargs): model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6") recursive_update(model_args, kwargs) return _build_effnet(**model_args) def _build_effnetv2(**kwargs): model_args = dict( stem_w=32, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv], depths=[1, 2, 2, 3, 5, 8], widths=[16, 32, 48, 96, 112, 192], strides=[1, 2, 2, 2, 1, 2], kernels=[3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25], ) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl" ) def effnet_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl" ) def effnet_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl" ) def effnet_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @
hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3/effnet_b3.pkl" )
megengine.hub.pretrained
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020 <NAME> # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """EfficientNet Series EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_ References: https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py """ import math from numbers import Real from typing import Any, Callable, Mapping, Sequence, Union import megengine.hub as hub import megengine.module as M from basecls.layers import ( SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d, ) from basecls.utils import recursive_update, registers from .mbnet import MBConv from .resnet import AnyStage, SimpleStem __all__ = ["FuseMBConv", "EffNet"] class FuseMBConv(M.Module): """Fusing the proj conv1x1 and depthwise conv into a conv2d. Args: w_in: input width. w_out: output width. stride: stride of conv. kernel: kernel of conv. exp_r: expansion ratio. se_r: SE ratio. has_skip: whether apply skip connection. drop_path_prob: drop path probability. norm_name: normalization function. act_name: activation function. """ def __init__( self, w_in: int, w_out: int, stride: int, kernel: int, exp_r: float, se_r: float, has_skip: bool, drop_path_prob: float, norm_name: str, act_name: str, **kwargs, ): super().__init__() # Expansion w_mid = w_in w_exp = int(w_in * exp_r) if exp_r != 1.0: self.exp = conv2d(w_in, w_exp, kernel, stride=stride) self.exp_bn = norm2d(norm_name, w_exp) self.exp_act = activation(act_name) w_mid = w_exp # SE if se_r > 0.0: w_se = int(w_in * se_r) self.se = SE(w_mid, w_se, act_name) # PWConv self.proj = conv2d( w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride ) self.proj_bn = norm2d(norm_name, w_out) self.has_proj_act = exp_r == 1.0 if self.has_proj_act: self.proj_act = activation(act_name) # Skip self.has_skip = has_skip and w_in == w_out and stride == 1 if self.has_skip: self.drop_path = DropPath(drop_path_prob) def forward(self, x): x_p = x if getattr(self, "exp", None) is not None: x = self.exp(x) x = self.exp_bn(x) x = self.exp_act(x) if getattr(self, "se", None) is not None: x = self.se(x) x = self.proj(x) x = self.proj_bn(x) if self.has_proj_act: x = self.proj_act(x) if self.has_skip: x = self.drop_path(x) x = x + x_p return x @registers.models.register() class EffNet(M.Module): """EfficientNet model. Args: stem_w: stem width. block_name: block name. depths: depth for each stage (number of blocks in the stage). widths: width for each stage (width of each block in the stage). strides: strides for each stage (applies to the first block of each stage). kernels: kernel sizes for each stage. exp_rs: expansion ratios for MBConv blocks in each stage. se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25`` drop_path_prob: drop path probability. Default: ``0.0`` depth_mult: depth multiplier. Default: ``1.0`` width_mult: width multiplier. Default: ``1.0`` omit_mult: omit multiplier for stem width, head width, the first stage depth and the last stage depth, enabled in EfficientNet-Lite. Default: ``False`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"silu"`` head: head args. Default: ``None`` """ def __init__( self, stem_w: int, block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]], depths: Sequence[int], widths: Sequence[int], strides: Sequence[int], kernels: Sequence[int], exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0, se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0, drop_path_prob: float = 0.0, depth_mult: float = 1.0, width_mult: float = 1.0, omit_mult: bool = False, norm_name: str = "BN", act_name: str = "silu", head: Mapping[str, Any] = None, ): super().__init__() depths = [ d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult) for i, d in enumerate(depths) ] self.depths = depths stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9) self.stem = SimpleStem(3, stem_w, norm_name, act_name) if isinstance(block_name, (str, Callable)): block_name = [block_name] * len(depths) block_func = [self.get_block_func(bn) for bn in block_name] widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths] if isinstance(exp_rs, Real): exp_rs = [exp_rs] * len(depths) if isinstance(se_rs, Real): se_rs = [se_rs] * len(depths) drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths))) drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths] model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs] prev_w = stem_w for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)): stage = AnyStage( prev_w, w, s, d, bf, kernel=k, exp_r=exp_r, se_r=se_r, se_from_exp=False, se_act_name=act_name, se_approx=False, se_rd_fn=int, has_proj_act=False, has_skip=True, drop_path_prob=dp_p, norm_name=norm_name, act_name=act_name, ) setattr(self, f"s{i + 1}", stage) prev_w = w if head: if head.get("width", 0) > 0 and not omit_mult: head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9) self.head = build_head(prev_w, head, norm_name, act_name) self.apply(init_weights) def forward(self, x): x = self.stem(x) for i in range(len(self.depths)): stage = getattr(self, f"s{i + 1}") x = stage(x) if getattr(self, "head", None) is not None: x = self.head(x) return x @staticmethod def get_block_func(name: Union[str, Callable]): """Retrieves the block function by name.""" if callable(name): return name if isinstance(name, str): block_funcs = { "FuseMBConv": FuseMBConv, "MBConv": MBConv, } if name in block_funcs.keys(): return block_funcs[name] raise ValueError(f"Block '{name}' not supported") def _build_effnet(**kwargs): model_args = dict( stem_w=32, block_name=MBConv, depths=[1, 2, 2, 3, 3, 4, 1], widths=[16, 24, 40, 80, 112, 192, 320], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 5, 3, 5, 5, 3], exp_rs=[1, 6, 6, 6, 6, 6, 6], se_rs=0.25, drop_path_prob=0.2, head=dict(name="ClsHead", width=1280, dropout_prob=0.2), ) recursive_update(model_args, kwargs) return EffNet(**model_args) def _build_effnet_lite(**kwargs): model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6") recursive_update(model_args, kwargs) return _build_effnet(**model_args) def _build_effnetv2(**kwargs): model_args = dict( stem_w=32, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv], depths=[1, 2, 2, 3, 5, 8], widths=[16, 32, 48, 96, 112, 192], strides=[1, 2, 2, 2, 1, 2], kernels=[3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25], ) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl" ) def effnet_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl" ) def effnet_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl" ) def effnet_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3/effnet_b3.pkl" ) def effnet_b3(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @
hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4/effnet_b4.pkl" )
megengine.hub.pretrained
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020 <NAME> # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """EfficientNet Series EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_ References: https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py """ import math from numbers import Real from typing import Any, Callable, Mapping, Sequence, Union import megengine.hub as hub import megengine.module as M from basecls.layers import ( SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d, ) from basecls.utils import recursive_update, registers from .mbnet import MBConv from .resnet import AnyStage, SimpleStem __all__ = ["FuseMBConv", "EffNet"] class FuseMBConv(M.Module): """Fusing the proj conv1x1 and depthwise conv into a conv2d. Args: w_in: input width. w_out: output width. stride: stride of conv. kernel: kernel of conv. exp_r: expansion ratio. se_r: SE ratio. has_skip: whether apply skip connection. drop_path_prob: drop path probability. norm_name: normalization function. act_name: activation function. """ def __init__( self, w_in: int, w_out: int, stride: int, kernel: int, exp_r: float, se_r: float, has_skip: bool, drop_path_prob: float, norm_name: str, act_name: str, **kwargs, ): super().__init__() # Expansion w_mid = w_in w_exp = int(w_in * exp_r) if exp_r != 1.0: self.exp = conv2d(w_in, w_exp, kernel, stride=stride) self.exp_bn = norm2d(norm_name, w_exp) self.exp_act = activation(act_name) w_mid = w_exp # SE if se_r > 0.0: w_se = int(w_in * se_r) self.se = SE(w_mid, w_se, act_name) # PWConv self.proj = conv2d( w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride ) self.proj_bn = norm2d(norm_name, w_out) self.has_proj_act = exp_r == 1.0 if self.has_proj_act: self.proj_act = activation(act_name) # Skip self.has_skip = has_skip and w_in == w_out and stride == 1 if self.has_skip: self.drop_path = DropPath(drop_path_prob) def forward(self, x): x_p = x if getattr(self, "exp", None) is not None: x = self.exp(x) x = self.exp_bn(x) x = self.exp_act(x) if getattr(self, "se", None) is not None: x = self.se(x) x = self.proj(x) x = self.proj_bn(x) if self.has_proj_act: x = self.proj_act(x) if self.has_skip: x = self.drop_path(x) x = x + x_p return x @registers.models.register() class EffNet(M.Module): """EfficientNet model. Args: stem_w: stem width. block_name: block name. depths: depth for each stage (number of blocks in the stage). widths: width for each stage (width of each block in the stage). strides: strides for each stage (applies to the first block of each stage). kernels: kernel sizes for each stage. exp_rs: expansion ratios for MBConv blocks in each stage. se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25`` drop_path_prob: drop path probability. Default: ``0.0`` depth_mult: depth multiplier. Default: ``1.0`` width_mult: width multiplier. Default: ``1.0`` omit_mult: omit multiplier for stem width, head width, the first stage depth and the last stage depth, enabled in EfficientNet-Lite. Default: ``False`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"silu"`` head: head args. Default: ``None`` """ def __init__( self, stem_w: int, block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]], depths: Sequence[int], widths: Sequence[int], strides: Sequence[int], kernels: Sequence[int], exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0, se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0, drop_path_prob: float = 0.0, depth_mult: float = 1.0, width_mult: float = 1.0, omit_mult: bool = False, norm_name: str = "BN", act_name: str = "silu", head: Mapping[str, Any] = None, ): super().__init__() depths = [ d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult) for i, d in enumerate(depths) ] self.depths = depths stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9) self.stem = SimpleStem(3, stem_w, norm_name, act_name) if isinstance(block_name, (str, Callable)): block_name = [block_name] * len(depths) block_func = [self.get_block_func(bn) for bn in block_name] widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths] if isinstance(exp_rs, Real): exp_rs = [exp_rs] * len(depths) if isinstance(se_rs, Real): se_rs = [se_rs] * len(depths) drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths))) drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths] model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs] prev_w = stem_w for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)): stage = AnyStage( prev_w, w, s, d, bf, kernel=k, exp_r=exp_r, se_r=se_r, se_from_exp=False, se_act_name=act_name, se_approx=False, se_rd_fn=int, has_proj_act=False, has_skip=True, drop_path_prob=dp_p, norm_name=norm_name, act_name=act_name, ) setattr(self, f"s{i + 1}", stage) prev_w = w if head: if head.get("width", 0) > 0 and not omit_mult: head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9) self.head = build_head(prev_w, head, norm_name, act_name) self.apply(init_weights) def forward(self, x): x = self.stem(x) for i in range(len(self.depths)): stage = getattr(self, f"s{i + 1}") x = stage(x) if getattr(self, "head", None) is not None: x = self.head(x) return x @staticmethod def get_block_func(name: Union[str, Callable]): """Retrieves the block function by name.""" if callable(name): return name if isinstance(name, str): block_funcs = { "FuseMBConv": FuseMBConv, "MBConv": MBConv, } if name in block_funcs.keys(): return block_funcs[name] raise ValueError(f"Block '{name}' not supported") def _build_effnet(**kwargs): model_args = dict( stem_w=32, block_name=MBConv, depths=[1, 2, 2, 3, 3, 4, 1], widths=[16, 24, 40, 80, 112, 192, 320], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 5, 3, 5, 5, 3], exp_rs=[1, 6, 6, 6, 6, 6, 6], se_rs=0.25, drop_path_prob=0.2, head=dict(name="ClsHead", width=1280, dropout_prob=0.2), ) recursive_update(model_args, kwargs) return EffNet(**model_args) def _build_effnet_lite(**kwargs): model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6") recursive_update(model_args, kwargs) return _build_effnet(**model_args) def _build_effnetv2(**kwargs): model_args = dict( stem_w=32, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv], depths=[1, 2, 2, 3, 5, 8], widths=[16, 32, 48, 96, 112, 192], strides=[1, 2, 2, 2, 1, 2], kernels=[3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25], ) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl" ) def effnet_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl" ) def effnet_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl" ) def effnet_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3/effnet_b3.pkl" ) def effnet_b3(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4/effnet_b4.pkl" ) def effnet_b4(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @
hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b5/effnet_b5.pkl" )
megengine.hub.pretrained
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020 <NAME> # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """EfficientNet Series EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_ References: https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py """ import math from numbers import Real from typing import Any, Callable, Mapping, Sequence, Union import megengine.hub as hub import megengine.module as M from basecls.layers import ( SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d, ) from basecls.utils import recursive_update, registers from .mbnet import MBConv from .resnet import AnyStage, SimpleStem __all__ = ["FuseMBConv", "EffNet"] class FuseMBConv(M.Module): """Fusing the proj conv1x1 and depthwise conv into a conv2d. Args: w_in: input width. w_out: output width. stride: stride of conv. kernel: kernel of conv. exp_r: expansion ratio. se_r: SE ratio. has_skip: whether apply skip connection. drop_path_prob: drop path probability. norm_name: normalization function. act_name: activation function. """ def __init__( self, w_in: int, w_out: int, stride: int, kernel: int, exp_r: float, se_r: float, has_skip: bool, drop_path_prob: float, norm_name: str, act_name: str, **kwargs, ): super().__init__() # Expansion w_mid = w_in w_exp = int(w_in * exp_r) if exp_r != 1.0: self.exp = conv2d(w_in, w_exp, kernel, stride=stride) self.exp_bn = norm2d(norm_name, w_exp) self.exp_act = activation(act_name) w_mid = w_exp # SE if se_r > 0.0: w_se = int(w_in * se_r) self.se = SE(w_mid, w_se, act_name) # PWConv self.proj = conv2d( w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride ) self.proj_bn = norm2d(norm_name, w_out) self.has_proj_act = exp_r == 1.0 if self.has_proj_act: self.proj_act = activation(act_name) # Skip self.has_skip = has_skip and w_in == w_out and stride == 1 if self.has_skip: self.drop_path = DropPath(drop_path_prob) def forward(self, x): x_p = x if getattr(self, "exp", None) is not None: x = self.exp(x) x = self.exp_bn(x) x = self.exp_act(x) if getattr(self, "se", None) is not None: x = self.se(x) x = self.proj(x) x = self.proj_bn(x) if self.has_proj_act: x = self.proj_act(x) if self.has_skip: x = self.drop_path(x) x = x + x_p return x @registers.models.register() class EffNet(M.Module): """EfficientNet model. Args: stem_w: stem width. block_name: block name. depths: depth for each stage (number of blocks in the stage). widths: width for each stage (width of each block in the stage). strides: strides for each stage (applies to the first block of each stage). kernels: kernel sizes for each stage. exp_rs: expansion ratios for MBConv blocks in each stage. se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25`` drop_path_prob: drop path probability. Default: ``0.0`` depth_mult: depth multiplier. Default: ``1.0`` width_mult: width multiplier. Default: ``1.0`` omit_mult: omit multiplier for stem width, head width, the first stage depth and the last stage depth, enabled in EfficientNet-Lite. Default: ``False`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"silu"`` head: head args. Default: ``None`` """ def __init__( self, stem_w: int, block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]], depths: Sequence[int], widths: Sequence[int], strides: Sequence[int], kernels: Sequence[int], exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0, se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0, drop_path_prob: float = 0.0, depth_mult: float = 1.0, width_mult: float = 1.0, omit_mult: bool = False, norm_name: str = "BN", act_name: str = "silu", head: Mapping[str, Any] = None, ): super().__init__() depths = [ d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult) for i, d in enumerate(depths) ] self.depths = depths stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9) self.stem = SimpleStem(3, stem_w, norm_name, act_name) if isinstance(block_name, (str, Callable)): block_name = [block_name] * len(depths) block_func = [self.get_block_func(bn) for bn in block_name] widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths] if isinstance(exp_rs, Real): exp_rs = [exp_rs] * len(depths) if isinstance(se_rs, Real): se_rs = [se_rs] * len(depths) drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths))) drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths] model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs] prev_w = stem_w for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)): stage = AnyStage( prev_w, w, s, d, bf, kernel=k, exp_r=exp_r, se_r=se_r, se_from_exp=False, se_act_name=act_name, se_approx=False, se_rd_fn=int, has_proj_act=False, has_skip=True, drop_path_prob=dp_p, norm_name=norm_name, act_name=act_name, ) setattr(self, f"s{i + 1}", stage) prev_w = w if head: if head.get("width", 0) > 0 and not omit_mult: head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9) self.head = build_head(prev_w, head, norm_name, act_name) self.apply(init_weights) def forward(self, x): x = self.stem(x) for i in range(len(self.depths)): stage = getattr(self, f"s{i + 1}") x = stage(x) if getattr(self, "head", None) is not None: x = self.head(x) return x @staticmethod def get_block_func(name: Union[str, Callable]): """Retrieves the block function by name.""" if callable(name): return name if isinstance(name, str): block_funcs = { "FuseMBConv": FuseMBConv, "MBConv": MBConv, } if name in block_funcs.keys(): return block_funcs[name] raise ValueError(f"Block '{name}' not supported") def _build_effnet(**kwargs): model_args = dict( stem_w=32, block_name=MBConv, depths=[1, 2, 2, 3, 3, 4, 1], widths=[16, 24, 40, 80, 112, 192, 320], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 5, 3, 5, 5, 3], exp_rs=[1, 6, 6, 6, 6, 6, 6], se_rs=0.25, drop_path_prob=0.2, head=dict(name="ClsHead", width=1280, dropout_prob=0.2), ) recursive_update(model_args, kwargs) return EffNet(**model_args) def _build_effnet_lite(**kwargs): model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6") recursive_update(model_args, kwargs) return _build_effnet(**model_args) def _build_effnetv2(**kwargs): model_args = dict( stem_w=32, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv], depths=[1, 2, 2, 3, 5, 8], widths=[16, 32, 48, 96, 112, 192], strides=[1, 2, 2, 2, 1, 2], kernels=[3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25], ) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl" ) def effnet_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl" ) def effnet_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl" ) def effnet_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3/effnet_b3.pkl" ) def effnet_b3(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4/effnet_b4.pkl" ) def effnet_b4(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b5/effnet_b5.pkl" ) def effnet_b5(**kwargs): model_args = dict(depth_mult=2.2, width_mult=1.6, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @
hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b6/effnet_b6.pkl" )
megengine.hub.pretrained
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020 <NAME> # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """EfficientNet Series EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_ References: https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py """ import math from numbers import Real from typing import Any, Callable, Mapping, Sequence, Union import megengine.hub as hub import megengine.module as M from basecls.layers import ( SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d, ) from basecls.utils import recursive_update, registers from .mbnet import MBConv from .resnet import AnyStage, SimpleStem __all__ = ["FuseMBConv", "EffNet"] class FuseMBConv(M.Module): """Fusing the proj conv1x1 and depthwise conv into a conv2d. Args: w_in: input width. w_out: output width. stride: stride of conv. kernel: kernel of conv. exp_r: expansion ratio. se_r: SE ratio. has_skip: whether apply skip connection. drop_path_prob: drop path probability. norm_name: normalization function. act_name: activation function. """ def __init__( self, w_in: int, w_out: int, stride: int, kernel: int, exp_r: float, se_r: float, has_skip: bool, drop_path_prob: float, norm_name: str, act_name: str, **kwargs, ): super().__init__() # Expansion w_mid = w_in w_exp = int(w_in * exp_r) if exp_r != 1.0: self.exp = conv2d(w_in, w_exp, kernel, stride=stride) self.exp_bn = norm2d(norm_name, w_exp) self.exp_act = activation(act_name) w_mid = w_exp # SE if se_r > 0.0: w_se = int(w_in * se_r) self.se = SE(w_mid, w_se, act_name) # PWConv self.proj = conv2d( w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride ) self.proj_bn = norm2d(norm_name, w_out) self.has_proj_act = exp_r == 1.0 if self.has_proj_act: self.proj_act = activation(act_name) # Skip self.has_skip = has_skip and w_in == w_out and stride == 1 if self.has_skip: self.drop_path = DropPath(drop_path_prob) def forward(self, x): x_p = x if getattr(self, "exp", None) is not None: x = self.exp(x) x = self.exp_bn(x) x = self.exp_act(x) if getattr(self, "se", None) is not None: x = self.se(x) x = self.proj(x) x = self.proj_bn(x) if self.has_proj_act: x = self.proj_act(x) if self.has_skip: x = self.drop_path(x) x = x + x_p return x @registers.models.register() class EffNet(M.Module): """EfficientNet model. Args: stem_w: stem width. block_name: block name. depths: depth for each stage (number of blocks in the stage). widths: width for each stage (width of each block in the stage). strides: strides for each stage (applies to the first block of each stage). kernels: kernel sizes for each stage. exp_rs: expansion ratios for MBConv blocks in each stage. se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25`` drop_path_prob: drop path probability. Default: ``0.0`` depth_mult: depth multiplier. Default: ``1.0`` width_mult: width multiplier. Default: ``1.0`` omit_mult: omit multiplier for stem width, head width, the first stage depth and the last stage depth, enabled in EfficientNet-Lite. Default: ``False`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"silu"`` head: head args. Default: ``None`` """ def __init__( self, stem_w: int, block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]], depths: Sequence[int], widths: Sequence[int], strides: Sequence[int], kernels: Sequence[int], exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0, se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0, drop_path_prob: float = 0.0, depth_mult: float = 1.0, width_mult: float = 1.0, omit_mult: bool = False, norm_name: str = "BN", act_name: str = "silu", head: Mapping[str, Any] = None, ): super().__init__() depths = [ d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult) for i, d in enumerate(depths) ] self.depths = depths stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9) self.stem = SimpleStem(3, stem_w, norm_name, act_name) if isinstance(block_name, (str, Callable)): block_name = [block_name] * len(depths) block_func = [self.get_block_func(bn) for bn in block_name] widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths] if isinstance(exp_rs, Real): exp_rs = [exp_rs] * len(depths) if isinstance(se_rs, Real): se_rs = [se_rs] * len(depths) drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths))) drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths] model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs] prev_w = stem_w for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)): stage = AnyStage( prev_w, w, s, d, bf, kernel=k, exp_r=exp_r, se_r=se_r, se_from_exp=False, se_act_name=act_name, se_approx=False, se_rd_fn=int, has_proj_act=False, has_skip=True, drop_path_prob=dp_p, norm_name=norm_name, act_name=act_name, ) setattr(self, f"s{i + 1}", stage) prev_w = w if head: if head.get("width", 0) > 0 and not omit_mult: head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9) self.head = build_head(prev_w, head, norm_name, act_name) self.apply(init_weights) def forward(self, x): x = self.stem(x) for i in range(len(self.depths)): stage = getattr(self, f"s{i + 1}") x = stage(x) if getattr(self, "head", None) is not None: x = self.head(x) return x @staticmethod def get_block_func(name: Union[str, Callable]): """Retrieves the block function by name.""" if callable(name): return name if isinstance(name, str): block_funcs = { "FuseMBConv": FuseMBConv, "MBConv": MBConv, } if name in block_funcs.keys(): return block_funcs[name] raise ValueError(f"Block '{name}' not supported") def _build_effnet(**kwargs): model_args = dict( stem_w=32, block_name=MBConv, depths=[1, 2, 2, 3, 3, 4, 1], widths=[16, 24, 40, 80, 112, 192, 320], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 5, 3, 5, 5, 3], exp_rs=[1, 6, 6, 6, 6, 6, 6], se_rs=0.25, drop_path_prob=0.2, head=dict(name="ClsHead", width=1280, dropout_prob=0.2), ) recursive_update(model_args, kwargs) return EffNet(**model_args) def _build_effnet_lite(**kwargs): model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6") recursive_update(model_args, kwargs) return _build_effnet(**model_args) def _build_effnetv2(**kwargs): model_args = dict( stem_w=32, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv], depths=[1, 2, 2, 3, 5, 8], widths=[16, 32, 48, 96, 112, 192], strides=[1, 2, 2, 2, 1, 2], kernels=[3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25], ) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl" ) def effnet_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl" ) def effnet_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl" ) def effnet_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3/effnet_b3.pkl" ) def effnet_b3(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4/effnet_b4.pkl" ) def effnet_b4(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b5/effnet_b5.pkl" ) def effnet_b5(**kwargs): model_args = dict(depth_mult=2.2, width_mult=1.6, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b6/effnet_b6.pkl" ) def effnet_b6(**kwargs): model_args = dict(depth_mult=2.6, width_mult=1.8, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @
hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b7/effnet_b7.pkl" )
megengine.hub.pretrained
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020 <NAME> # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """EfficientNet Series EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_ References: https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py """ import math from numbers import Real from typing import Any, Callable, Mapping, Sequence, Union import megengine.hub as hub import megengine.module as M from basecls.layers import ( SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d, ) from basecls.utils import recursive_update, registers from .mbnet import MBConv from .resnet import AnyStage, SimpleStem __all__ = ["FuseMBConv", "EffNet"] class FuseMBConv(M.Module): """Fusing the proj conv1x1 and depthwise conv into a conv2d. Args: w_in: input width. w_out: output width. stride: stride of conv. kernel: kernel of conv. exp_r: expansion ratio. se_r: SE ratio. has_skip: whether apply skip connection. drop_path_prob: drop path probability. norm_name: normalization function. act_name: activation function. """ def __init__( self, w_in: int, w_out: int, stride: int, kernel: int, exp_r: float, se_r: float, has_skip: bool, drop_path_prob: float, norm_name: str, act_name: str, **kwargs, ): super().__init__() # Expansion w_mid = w_in w_exp = int(w_in * exp_r) if exp_r != 1.0: self.exp = conv2d(w_in, w_exp, kernel, stride=stride) self.exp_bn = norm2d(norm_name, w_exp) self.exp_act = activation(act_name) w_mid = w_exp # SE if se_r > 0.0: w_se = int(w_in * se_r) self.se = SE(w_mid, w_se, act_name) # PWConv self.proj = conv2d( w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride ) self.proj_bn = norm2d(norm_name, w_out) self.has_proj_act = exp_r == 1.0 if self.has_proj_act: self.proj_act = activation(act_name) # Skip self.has_skip = has_skip and w_in == w_out and stride == 1 if self.has_skip: self.drop_path = DropPath(drop_path_prob) def forward(self, x): x_p = x if getattr(self, "exp", None) is not None: x = self.exp(x) x = self.exp_bn(x) x = self.exp_act(x) if getattr(self, "se", None) is not None: x = self.se(x) x = self.proj(x) x = self.proj_bn(x) if self.has_proj_act: x = self.proj_act(x) if self.has_skip: x = self.drop_path(x) x = x + x_p return x @registers.models.register() class EffNet(M.Module): """EfficientNet model. Args: stem_w: stem width. block_name: block name. depths: depth for each stage (number of blocks in the stage). widths: width for each stage (width of each block in the stage). strides: strides for each stage (applies to the first block of each stage). kernels: kernel sizes for each stage. exp_rs: expansion ratios for MBConv blocks in each stage. se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25`` drop_path_prob: drop path probability. Default: ``0.0`` depth_mult: depth multiplier. Default: ``1.0`` width_mult: width multiplier. Default: ``1.0`` omit_mult: omit multiplier for stem width, head width, the first stage depth and the last stage depth, enabled in EfficientNet-Lite. Default: ``False`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"silu"`` head: head args. Default: ``None`` """ def __init__( self, stem_w: int, block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]], depths: Sequence[int], widths: Sequence[int], strides: Sequence[int], kernels: Sequence[int], exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0, se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0, drop_path_prob: float = 0.0, depth_mult: float = 1.0, width_mult: float = 1.0, omit_mult: bool = False, norm_name: str = "BN", act_name: str = "silu", head: Mapping[str, Any] = None, ): super().__init__() depths = [ d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult) for i, d in enumerate(depths) ] self.depths = depths stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9) self.stem = SimpleStem(3, stem_w, norm_name, act_name) if isinstance(block_name, (str, Callable)): block_name = [block_name] * len(depths) block_func = [self.get_block_func(bn) for bn in block_name] widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths] if isinstance(exp_rs, Real): exp_rs = [exp_rs] * len(depths) if isinstance(se_rs, Real): se_rs = [se_rs] * len(depths) drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths))) drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths] model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs] prev_w = stem_w for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)): stage = AnyStage( prev_w, w, s, d, bf, kernel=k, exp_r=exp_r, se_r=se_r, se_from_exp=False, se_act_name=act_name, se_approx=False, se_rd_fn=int, has_proj_act=False, has_skip=True, drop_path_prob=dp_p, norm_name=norm_name, act_name=act_name, ) setattr(self, f"s{i + 1}", stage) prev_w = w if head: if head.get("width", 0) > 0 and not omit_mult: head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9) self.head = build_head(prev_w, head, norm_name, act_name) self.apply(init_weights) def forward(self, x): x = self.stem(x) for i in range(len(self.depths)): stage = getattr(self, f"s{i + 1}") x = stage(x) if getattr(self, "head", None) is not None: x = self.head(x) return x @staticmethod def get_block_func(name: Union[str, Callable]): """Retrieves the block function by name.""" if callable(name): return name if isinstance(name, str): block_funcs = { "FuseMBConv": FuseMBConv, "MBConv": MBConv, } if name in block_funcs.keys(): return block_funcs[name] raise ValueError(f"Block '{name}' not supported") def _build_effnet(**kwargs): model_args = dict( stem_w=32, block_name=MBConv, depths=[1, 2, 2, 3, 3, 4, 1], widths=[16, 24, 40, 80, 112, 192, 320], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 5, 3, 5, 5, 3], exp_rs=[1, 6, 6, 6, 6, 6, 6], se_rs=0.25, drop_path_prob=0.2, head=dict(name="ClsHead", width=1280, dropout_prob=0.2), ) recursive_update(model_args, kwargs) return EffNet(**model_args) def _build_effnet_lite(**kwargs): model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6") recursive_update(model_args, kwargs) return _build_effnet(**model_args) def _build_effnetv2(**kwargs): model_args = dict( stem_w=32, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv], depths=[1, 2, 2, 3, 5, 8], widths=[16, 32, 48, 96, 112, 192], strides=[1, 2, 2, 2, 1, 2], kernels=[3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25], ) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl" ) def effnet_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl" ) def effnet_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl" ) def effnet_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3/effnet_b3.pkl" ) def effnet_b3(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4/effnet_b4.pkl" ) def effnet_b4(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b5/effnet_b5.pkl" ) def effnet_b5(**kwargs): model_args = dict(depth_mult=2.2, width_mult=1.6, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b6/effnet_b6.pkl" ) def effnet_b6(**kwargs): model_args = dict(depth_mult=2.6, width_mult=1.8, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b7/effnet_b7.pkl" ) def effnet_b7(**kwargs): model_args = dict(depth_mult=3.1, width_mult=2.0, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @
hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b8/effnet_b8.pkl" )
megengine.hub.pretrained
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020 <NAME> # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """EfficientNet Series EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_ References: https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py """ import math from numbers import Real from typing import Any, Callable, Mapping, Sequence, Union import megengine.hub as hub import megengine.module as M from basecls.layers import ( SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d, ) from basecls.utils import recursive_update, registers from .mbnet import MBConv from .resnet import AnyStage, SimpleStem __all__ = ["FuseMBConv", "EffNet"] class FuseMBConv(M.Module): """Fusing the proj conv1x1 and depthwise conv into a conv2d. Args: w_in: input width. w_out: output width. stride: stride of conv. kernel: kernel of conv. exp_r: expansion ratio. se_r: SE ratio. has_skip: whether apply skip connection. drop_path_prob: drop path probability. norm_name: normalization function. act_name: activation function. """ def __init__( self, w_in: int, w_out: int, stride: int, kernel: int, exp_r: float, se_r: float, has_skip: bool, drop_path_prob: float, norm_name: str, act_name: str, **kwargs, ): super().__init__() # Expansion w_mid = w_in w_exp = int(w_in * exp_r) if exp_r != 1.0: self.exp = conv2d(w_in, w_exp, kernel, stride=stride) self.exp_bn = norm2d(norm_name, w_exp) self.exp_act = activation(act_name) w_mid = w_exp # SE if se_r > 0.0: w_se = int(w_in * se_r) self.se = SE(w_mid, w_se, act_name) # PWConv self.proj = conv2d( w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride ) self.proj_bn = norm2d(norm_name, w_out) self.has_proj_act = exp_r == 1.0 if self.has_proj_act: self.proj_act = activation(act_name) # Skip self.has_skip = has_skip and w_in == w_out and stride == 1 if self.has_skip: self.drop_path = DropPath(drop_path_prob) def forward(self, x): x_p = x if getattr(self, "exp", None) is not None: x = self.exp(x) x = self.exp_bn(x) x = self.exp_act(x) if getattr(self, "se", None) is not None: x = self.se(x) x = self.proj(x) x = self.proj_bn(x) if self.has_proj_act: x = self.proj_act(x) if self.has_skip: x = self.drop_path(x) x = x + x_p return x @registers.models.register() class EffNet(M.Module): """EfficientNet model. Args: stem_w: stem width. block_name: block name. depths: depth for each stage (number of blocks in the stage). widths: width for each stage (width of each block in the stage). strides: strides for each stage (applies to the first block of each stage). kernels: kernel sizes for each stage. exp_rs: expansion ratios for MBConv blocks in each stage. se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25`` drop_path_prob: drop path probability. Default: ``0.0`` depth_mult: depth multiplier. Default: ``1.0`` width_mult: width multiplier. Default: ``1.0`` omit_mult: omit multiplier for stem width, head width, the first stage depth and the last stage depth, enabled in EfficientNet-Lite. Default: ``False`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"silu"`` head: head args. Default: ``None`` """ def __init__( self, stem_w: int, block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]], depths: Sequence[int], widths: Sequence[int], strides: Sequence[int], kernels: Sequence[int], exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0, se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0, drop_path_prob: float = 0.0, depth_mult: float = 1.0, width_mult: float = 1.0, omit_mult: bool = False, norm_name: str = "BN", act_name: str = "silu", head: Mapping[str, Any] = None, ): super().__init__() depths = [ d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult) for i, d in enumerate(depths) ] self.depths = depths stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9) self.stem = SimpleStem(3, stem_w, norm_name, act_name) if isinstance(block_name, (str, Callable)): block_name = [block_name] * len(depths) block_func = [self.get_block_func(bn) for bn in block_name] widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths] if isinstance(exp_rs, Real): exp_rs = [exp_rs] * len(depths) if isinstance(se_rs, Real): se_rs = [se_rs] * len(depths) drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths))) drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths] model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs] prev_w = stem_w for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)): stage = AnyStage( prev_w, w, s, d, bf, kernel=k, exp_r=exp_r, se_r=se_r, se_from_exp=False, se_act_name=act_name, se_approx=False, se_rd_fn=int, has_proj_act=False, has_skip=True, drop_path_prob=dp_p, norm_name=norm_name, act_name=act_name, ) setattr(self, f"s{i + 1}", stage) prev_w = w if head: if head.get("width", 0) > 0 and not omit_mult: head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9) self.head = build_head(prev_w, head, norm_name, act_name) self.apply(init_weights) def forward(self, x): x = self.stem(x) for i in range(len(self.depths)): stage = getattr(self, f"s{i + 1}") x = stage(x) if getattr(self, "head", None) is not None: x = self.head(x) return x @staticmethod def get_block_func(name: Union[str, Callable]): """Retrieves the block function by name.""" if callable(name): return name if isinstance(name, str): block_funcs = { "FuseMBConv": FuseMBConv, "MBConv": MBConv, } if name in block_funcs.keys(): return block_funcs[name] raise ValueError(f"Block '{name}' not supported") def _build_effnet(**kwargs): model_args = dict( stem_w=32, block_name=MBConv, depths=[1, 2, 2, 3, 3, 4, 1], widths=[16, 24, 40, 80, 112, 192, 320], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 5, 3, 5, 5, 3], exp_rs=[1, 6, 6, 6, 6, 6, 6], se_rs=0.25, drop_path_prob=0.2, head=dict(name="ClsHead", width=1280, dropout_prob=0.2), ) recursive_update(model_args, kwargs) return EffNet(**model_args) def _build_effnet_lite(**kwargs): model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6") recursive_update(model_args, kwargs) return _build_effnet(**model_args) def _build_effnetv2(**kwargs): model_args = dict( stem_w=32, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv], depths=[1, 2, 2, 3, 5, 8], widths=[16, 32, 48, 96, 112, 192], strides=[1, 2, 2, 2, 1, 2], kernels=[3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25], ) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl" ) def effnet_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl" ) def effnet_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl" ) def effnet_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3/effnet_b3.pkl" ) def effnet_b3(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4/effnet_b4.pkl" ) def effnet_b4(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b5/effnet_b5.pkl" ) def effnet_b5(**kwargs): model_args = dict(depth_mult=2.2, width_mult=1.6, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b6/effnet_b6.pkl" ) def effnet_b6(**kwargs): model_args = dict(depth_mult=2.6, width_mult=1.8, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b7/effnet_b7.pkl" ) def effnet_b7(**kwargs): model_args = dict(depth_mult=3.1, width_mult=2.0, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b8/effnet_b8.pkl" ) def effnet_b8(**kwargs): model_args = dict(depth_mult=3.6, width_mult=2.2, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @
hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_l2/effnet_l2.pkl" )
megengine.hub.pretrained
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020 <NAME> # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """EfficientNet Series EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_ References: https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py """ import math from numbers import Real from typing import Any, Callable, Mapping, Sequence, Union import megengine.hub as hub import megengine.module as M from basecls.layers import ( SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d, ) from basecls.utils import recursive_update, registers from .mbnet import MBConv from .resnet import AnyStage, SimpleStem __all__ = ["FuseMBConv", "EffNet"] class FuseMBConv(M.Module): """Fusing the proj conv1x1 and depthwise conv into a conv2d. Args: w_in: input width. w_out: output width. stride: stride of conv. kernel: kernel of conv. exp_r: expansion ratio. se_r: SE ratio. has_skip: whether apply skip connection. drop_path_prob: drop path probability. norm_name: normalization function. act_name: activation function. """ def __init__( self, w_in: int, w_out: int, stride: int, kernel: int, exp_r: float, se_r: float, has_skip: bool, drop_path_prob: float, norm_name: str, act_name: str, **kwargs, ): super().__init__() # Expansion w_mid = w_in w_exp = int(w_in * exp_r) if exp_r != 1.0: self.exp = conv2d(w_in, w_exp, kernel, stride=stride) self.exp_bn = norm2d(norm_name, w_exp) self.exp_act = activation(act_name) w_mid = w_exp # SE if se_r > 0.0: w_se = int(w_in * se_r) self.se = SE(w_mid, w_se, act_name) # PWConv self.proj = conv2d( w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride ) self.proj_bn = norm2d(norm_name, w_out) self.has_proj_act = exp_r == 1.0 if self.has_proj_act: self.proj_act = activation(act_name) # Skip self.has_skip = has_skip and w_in == w_out and stride == 1 if self.has_skip: self.drop_path = DropPath(drop_path_prob) def forward(self, x): x_p = x if getattr(self, "exp", None) is not None: x = self.exp(x) x = self.exp_bn(x) x = self.exp_act(x) if getattr(self, "se", None) is not None: x = self.se(x) x = self.proj(x) x = self.proj_bn(x) if self.has_proj_act: x = self.proj_act(x) if self.has_skip: x = self.drop_path(x) x = x + x_p return x @registers.models.register() class EffNet(M.Module): """EfficientNet model. Args: stem_w: stem width. block_name: block name. depths: depth for each stage (number of blocks in the stage). widths: width for each stage (width of each block in the stage). strides: strides for each stage (applies to the first block of each stage). kernels: kernel sizes for each stage. exp_rs: expansion ratios for MBConv blocks in each stage. se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25`` drop_path_prob: drop path probability. Default: ``0.0`` depth_mult: depth multiplier. Default: ``1.0`` width_mult: width multiplier. Default: ``1.0`` omit_mult: omit multiplier for stem width, head width, the first stage depth and the last stage depth, enabled in EfficientNet-Lite. Default: ``False`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"silu"`` head: head args. Default: ``None`` """ def __init__( self, stem_w: int, block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]], depths: Sequence[int], widths: Sequence[int], strides: Sequence[int], kernels: Sequence[int], exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0, se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0, drop_path_prob: float = 0.0, depth_mult: float = 1.0, width_mult: float = 1.0, omit_mult: bool = False, norm_name: str = "BN", act_name: str = "silu", head: Mapping[str, Any] = None, ): super().__init__() depths = [ d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult) for i, d in enumerate(depths) ] self.depths = depths stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9) self.stem = SimpleStem(3, stem_w, norm_name, act_name) if isinstance(block_name, (str, Callable)): block_name = [block_name] * len(depths) block_func = [self.get_block_func(bn) for bn in block_name] widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths] if isinstance(exp_rs, Real): exp_rs = [exp_rs] * len(depths) if isinstance(se_rs, Real): se_rs = [se_rs] * len(depths) drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths))) drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths] model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs] prev_w = stem_w for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)): stage = AnyStage( prev_w, w, s, d, bf, kernel=k, exp_r=exp_r, se_r=se_r, se_from_exp=False, se_act_name=act_name, se_approx=False, se_rd_fn=int, has_proj_act=False, has_skip=True, drop_path_prob=dp_p, norm_name=norm_name, act_name=act_name, ) setattr(self, f"s{i + 1}", stage) prev_w = w if head: if head.get("width", 0) > 0 and not omit_mult: head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9) self.head = build_head(prev_w, head, norm_name, act_name) self.apply(init_weights) def forward(self, x): x = self.stem(x) for i in range(len(self.depths)): stage = getattr(self, f"s{i + 1}") x = stage(x) if getattr(self, "head", None) is not None: x = self.head(x) return x @staticmethod def get_block_func(name: Union[str, Callable]): """Retrieves the block function by name.""" if callable(name): return name if isinstance(name, str): block_funcs = { "FuseMBConv": FuseMBConv, "MBConv": MBConv, } if name in block_funcs.keys(): return block_funcs[name] raise ValueError(f"Block '{name}' not supported") def _build_effnet(**kwargs): model_args = dict( stem_w=32, block_name=MBConv, depths=[1, 2, 2, 3, 3, 4, 1], widths=[16, 24, 40, 80, 112, 192, 320], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 5, 3, 5, 5, 3], exp_rs=[1, 6, 6, 6, 6, 6, 6], se_rs=0.25, drop_path_prob=0.2, head=dict(name="ClsHead", width=1280, dropout_prob=0.2), ) recursive_update(model_args, kwargs) return EffNet(**model_args) def _build_effnet_lite(**kwargs): model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6") recursive_update(model_args, kwargs) return _build_effnet(**model_args) def _build_effnetv2(**kwargs): model_args = dict( stem_w=32, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv], depths=[1, 2, 2, 3, 5, 8], widths=[16, 32, 48, 96, 112, 192], strides=[1, 2, 2, 2, 1, 2], kernels=[3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25], ) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl" ) def effnet_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl" ) def effnet_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl" ) def effnet_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3/effnet_b3.pkl" ) def effnet_b3(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4/effnet_b4.pkl" ) def effnet_b4(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b5/effnet_b5.pkl" ) def effnet_b5(**kwargs): model_args = dict(depth_mult=2.2, width_mult=1.6, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b6/effnet_b6.pkl" ) def effnet_b6(**kwargs): model_args = dict(depth_mult=2.6, width_mult=1.8, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b7/effnet_b7.pkl" ) def effnet_b7(**kwargs): model_args = dict(depth_mult=3.1, width_mult=2.0, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b8/effnet_b8.pkl" ) def effnet_b8(**kwargs): model_args = dict(depth_mult=3.6, width_mult=2.2, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_l2/effnet_l2.pkl" ) def effnet_l2(**kwargs): model_args = dict(depth_mult=5.3, width_mult=4.3, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @
hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0_lite/effnet_b0_lite.pkl" )
megengine.hub.pretrained
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020 <NAME> # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """EfficientNet Series EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_ References: https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py """ import math from numbers import Real from typing import Any, Callable, Mapping, Sequence, Union import megengine.hub as hub import megengine.module as M from basecls.layers import ( SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d, ) from basecls.utils import recursive_update, registers from .mbnet import MBConv from .resnet import AnyStage, SimpleStem __all__ = ["FuseMBConv", "EffNet"] class FuseMBConv(M.Module): """Fusing the proj conv1x1 and depthwise conv into a conv2d. Args: w_in: input width. w_out: output width. stride: stride of conv. kernel: kernel of conv. exp_r: expansion ratio. se_r: SE ratio. has_skip: whether apply skip connection. drop_path_prob: drop path probability. norm_name: normalization function. act_name: activation function. """ def __init__( self, w_in: int, w_out: int, stride: int, kernel: int, exp_r: float, se_r: float, has_skip: bool, drop_path_prob: float, norm_name: str, act_name: str, **kwargs, ): super().__init__() # Expansion w_mid = w_in w_exp = int(w_in * exp_r) if exp_r != 1.0: self.exp = conv2d(w_in, w_exp, kernel, stride=stride) self.exp_bn = norm2d(norm_name, w_exp) self.exp_act = activation(act_name) w_mid = w_exp # SE if se_r > 0.0: w_se = int(w_in * se_r) self.se = SE(w_mid, w_se, act_name) # PWConv self.proj = conv2d( w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride ) self.proj_bn = norm2d(norm_name, w_out) self.has_proj_act = exp_r == 1.0 if self.has_proj_act: self.proj_act = activation(act_name) # Skip self.has_skip = has_skip and w_in == w_out and stride == 1 if self.has_skip: self.drop_path = DropPath(drop_path_prob) def forward(self, x): x_p = x if getattr(self, "exp", None) is not None: x = self.exp(x) x = self.exp_bn(x) x = self.exp_act(x) if getattr(self, "se", None) is not None: x = self.se(x) x = self.proj(x) x = self.proj_bn(x) if self.has_proj_act: x = self.proj_act(x) if self.has_skip: x = self.drop_path(x) x = x + x_p return x @registers.models.register() class EffNet(M.Module): """EfficientNet model. Args: stem_w: stem width. block_name: block name. depths: depth for each stage (number of blocks in the stage). widths: width for each stage (width of each block in the stage). strides: strides for each stage (applies to the first block of each stage). kernels: kernel sizes for each stage. exp_rs: expansion ratios for MBConv blocks in each stage. se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25`` drop_path_prob: drop path probability. Default: ``0.0`` depth_mult: depth multiplier. Default: ``1.0`` width_mult: width multiplier. Default: ``1.0`` omit_mult: omit multiplier for stem width, head width, the first stage depth and the last stage depth, enabled in EfficientNet-Lite. Default: ``False`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"silu"`` head: head args. Default: ``None`` """ def __init__( self, stem_w: int, block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]], depths: Sequence[int], widths: Sequence[int], strides: Sequence[int], kernels: Sequence[int], exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0, se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0, drop_path_prob: float = 0.0, depth_mult: float = 1.0, width_mult: float = 1.0, omit_mult: bool = False, norm_name: str = "BN", act_name: str = "silu", head: Mapping[str, Any] = None, ): super().__init__() depths = [ d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult) for i, d in enumerate(depths) ] self.depths = depths stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9) self.stem = SimpleStem(3, stem_w, norm_name, act_name) if isinstance(block_name, (str, Callable)): block_name = [block_name] * len(depths) block_func = [self.get_block_func(bn) for bn in block_name] widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths] if isinstance(exp_rs, Real): exp_rs = [exp_rs] * len(depths) if isinstance(se_rs, Real): se_rs = [se_rs] * len(depths) drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths))) drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths] model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs] prev_w = stem_w for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)): stage = AnyStage( prev_w, w, s, d, bf, kernel=k, exp_r=exp_r, se_r=se_r, se_from_exp=False, se_act_name=act_name, se_approx=False, se_rd_fn=int, has_proj_act=False, has_skip=True, drop_path_prob=dp_p, norm_name=norm_name, act_name=act_name, ) setattr(self, f"s{i + 1}", stage) prev_w = w if head: if head.get("width", 0) > 0 and not omit_mult: head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9) self.head = build_head(prev_w, head, norm_name, act_name) self.apply(init_weights) def forward(self, x): x = self.stem(x) for i in range(len(self.depths)): stage = getattr(self, f"s{i + 1}") x = stage(x) if getattr(self, "head", None) is not None: x = self.head(x) return x @staticmethod def get_block_func(name: Union[str, Callable]): """Retrieves the block function by name.""" if callable(name): return name if isinstance(name, str): block_funcs = { "FuseMBConv": FuseMBConv, "MBConv": MBConv, } if name in block_funcs.keys(): return block_funcs[name] raise ValueError(f"Block '{name}' not supported") def _build_effnet(**kwargs): model_args = dict( stem_w=32, block_name=MBConv, depths=[1, 2, 2, 3, 3, 4, 1], widths=[16, 24, 40, 80, 112, 192, 320], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 5, 3, 5, 5, 3], exp_rs=[1, 6, 6, 6, 6, 6, 6], se_rs=0.25, drop_path_prob=0.2, head=dict(name="ClsHead", width=1280, dropout_prob=0.2), ) recursive_update(model_args, kwargs) return EffNet(**model_args) def _build_effnet_lite(**kwargs): model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6") recursive_update(model_args, kwargs) return _build_effnet(**model_args) def _build_effnetv2(**kwargs): model_args = dict( stem_w=32, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv], depths=[1, 2, 2, 3, 5, 8], widths=[16, 32, 48, 96, 112, 192], strides=[1, 2, 2, 2, 1, 2], kernels=[3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25], ) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl" ) def effnet_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl" ) def effnet_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl" ) def effnet_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3/effnet_b3.pkl" ) def effnet_b3(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4/effnet_b4.pkl" ) def effnet_b4(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b5/effnet_b5.pkl" ) def effnet_b5(**kwargs): model_args = dict(depth_mult=2.2, width_mult=1.6, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b6/effnet_b6.pkl" ) def effnet_b6(**kwargs): model_args = dict(depth_mult=2.6, width_mult=1.8, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b7/effnet_b7.pkl" ) def effnet_b7(**kwargs): model_args = dict(depth_mult=3.1, width_mult=2.0, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b8/effnet_b8.pkl" ) def effnet_b8(**kwargs): model_args = dict(depth_mult=3.6, width_mult=2.2, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_l2/effnet_l2.pkl" ) def effnet_l2(**kwargs): model_args = dict(depth_mult=5.3, width_mult=4.3, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0_lite/effnet_b0_lite.pkl" ) def effnet_b0_lite(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @
hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1_lite/effnet_b1_lite.pkl" )
megengine.hub.pretrained
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020 <NAME> # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """EfficientNet Series EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_ References: https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py """ import math from numbers import Real from typing import Any, Callable, Mapping, Sequence, Union import megengine.hub as hub import megengine.module as M from basecls.layers import ( SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d, ) from basecls.utils import recursive_update, registers from .mbnet import MBConv from .resnet import AnyStage, SimpleStem __all__ = ["FuseMBConv", "EffNet"] class FuseMBConv(M.Module): """Fusing the proj conv1x1 and depthwise conv into a conv2d. Args: w_in: input width. w_out: output width. stride: stride of conv. kernel: kernel of conv. exp_r: expansion ratio. se_r: SE ratio. has_skip: whether apply skip connection. drop_path_prob: drop path probability. norm_name: normalization function. act_name: activation function. """ def __init__( self, w_in: int, w_out: int, stride: int, kernel: int, exp_r: float, se_r: float, has_skip: bool, drop_path_prob: float, norm_name: str, act_name: str, **kwargs, ): super().__init__() # Expansion w_mid = w_in w_exp = int(w_in * exp_r) if exp_r != 1.0: self.exp = conv2d(w_in, w_exp, kernel, stride=stride) self.exp_bn = norm2d(norm_name, w_exp) self.exp_act = activation(act_name) w_mid = w_exp # SE if se_r > 0.0: w_se = int(w_in * se_r) self.se = SE(w_mid, w_se, act_name) # PWConv self.proj = conv2d( w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride ) self.proj_bn = norm2d(norm_name, w_out) self.has_proj_act = exp_r == 1.0 if self.has_proj_act: self.proj_act = activation(act_name) # Skip self.has_skip = has_skip and w_in == w_out and stride == 1 if self.has_skip: self.drop_path = DropPath(drop_path_prob) def forward(self, x): x_p = x if getattr(self, "exp", None) is not None: x = self.exp(x) x = self.exp_bn(x) x = self.exp_act(x) if getattr(self, "se", None) is not None: x = self.se(x) x = self.proj(x) x = self.proj_bn(x) if self.has_proj_act: x = self.proj_act(x) if self.has_skip: x = self.drop_path(x) x = x + x_p return x @registers.models.register() class EffNet(M.Module): """EfficientNet model. Args: stem_w: stem width. block_name: block name. depths: depth for each stage (number of blocks in the stage). widths: width for each stage (width of each block in the stage). strides: strides for each stage (applies to the first block of each stage). kernels: kernel sizes for each stage. exp_rs: expansion ratios for MBConv blocks in each stage. se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25`` drop_path_prob: drop path probability. Default: ``0.0`` depth_mult: depth multiplier. Default: ``1.0`` width_mult: width multiplier. Default: ``1.0`` omit_mult: omit multiplier for stem width, head width, the first stage depth and the last stage depth, enabled in EfficientNet-Lite. Default: ``False`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"silu"`` head: head args. Default: ``None`` """ def __init__( self, stem_w: int, block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]], depths: Sequence[int], widths: Sequence[int], strides: Sequence[int], kernels: Sequence[int], exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0, se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0, drop_path_prob: float = 0.0, depth_mult: float = 1.0, width_mult: float = 1.0, omit_mult: bool = False, norm_name: str = "BN", act_name: str = "silu", head: Mapping[str, Any] = None, ): super().__init__() depths = [ d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult) for i, d in enumerate(depths) ] self.depths = depths stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9) self.stem = SimpleStem(3, stem_w, norm_name, act_name) if isinstance(block_name, (str, Callable)): block_name = [block_name] * len(depths) block_func = [self.get_block_func(bn) for bn in block_name] widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths] if isinstance(exp_rs, Real): exp_rs = [exp_rs] * len(depths) if isinstance(se_rs, Real): se_rs = [se_rs] * len(depths) drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths))) drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths] model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs] prev_w = stem_w for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)): stage = AnyStage( prev_w, w, s, d, bf, kernel=k, exp_r=exp_r, se_r=se_r, se_from_exp=False, se_act_name=act_name, se_approx=False, se_rd_fn=int, has_proj_act=False, has_skip=True, drop_path_prob=dp_p, norm_name=norm_name, act_name=act_name, ) setattr(self, f"s{i + 1}", stage) prev_w = w if head: if head.get("width", 0) > 0 and not omit_mult: head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9) self.head = build_head(prev_w, head, norm_name, act_name) self.apply(init_weights) def forward(self, x): x = self.stem(x) for i in range(len(self.depths)): stage = getattr(self, f"s{i + 1}") x = stage(x) if getattr(self, "head", None) is not None: x = self.head(x) return x @staticmethod def get_block_func(name: Union[str, Callable]): """Retrieves the block function by name.""" if callable(name): return name if isinstance(name, str): block_funcs = { "FuseMBConv": FuseMBConv, "MBConv": MBConv, } if name in block_funcs.keys(): return block_funcs[name] raise ValueError(f"Block '{name}' not supported") def _build_effnet(**kwargs): model_args = dict( stem_w=32, block_name=MBConv, depths=[1, 2, 2, 3, 3, 4, 1], widths=[16, 24, 40, 80, 112, 192, 320], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 5, 3, 5, 5, 3], exp_rs=[1, 6, 6, 6, 6, 6, 6], se_rs=0.25, drop_path_prob=0.2, head=dict(name="ClsHead", width=1280, dropout_prob=0.2), ) recursive_update(model_args, kwargs) return EffNet(**model_args) def _build_effnet_lite(**kwargs): model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6") recursive_update(model_args, kwargs) return _build_effnet(**model_args) def _build_effnetv2(**kwargs): model_args = dict( stem_w=32, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv], depths=[1, 2, 2, 3, 5, 8], widths=[16, 32, 48, 96, 112, 192], strides=[1, 2, 2, 2, 1, 2], kernels=[3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25], ) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl" ) def effnet_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl" ) def effnet_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl" ) def effnet_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3/effnet_b3.pkl" ) def effnet_b3(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4/effnet_b4.pkl" ) def effnet_b4(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b5/effnet_b5.pkl" ) def effnet_b5(**kwargs): model_args = dict(depth_mult=2.2, width_mult=1.6, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b6/effnet_b6.pkl" ) def effnet_b6(**kwargs): model_args = dict(depth_mult=2.6, width_mult=1.8, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b7/effnet_b7.pkl" ) def effnet_b7(**kwargs): model_args = dict(depth_mult=3.1, width_mult=2.0, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b8/effnet_b8.pkl" ) def effnet_b8(**kwargs): model_args = dict(depth_mult=3.6, width_mult=2.2, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_l2/effnet_l2.pkl" ) def effnet_l2(**kwargs): model_args = dict(depth_mult=5.3, width_mult=4.3, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0_lite/effnet_b0_lite.pkl" ) def effnet_b0_lite(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1_lite/effnet_b1_lite.pkl" ) def effnet_b1_lite(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @
hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2_lite/effnet_b2_lite.pkl" )
megengine.hub.pretrained
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020 <NAME> # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """EfficientNet Series EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_ References: https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py """ import math from numbers import Real from typing import Any, Callable, Mapping, Sequence, Union import megengine.hub as hub import megengine.module as M from basecls.layers import ( SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d, ) from basecls.utils import recursive_update, registers from .mbnet import MBConv from .resnet import AnyStage, SimpleStem __all__ = ["FuseMBConv", "EffNet"] class FuseMBConv(M.Module): """Fusing the proj conv1x1 and depthwise conv into a conv2d. Args: w_in: input width. w_out: output width. stride: stride of conv. kernel: kernel of conv. exp_r: expansion ratio. se_r: SE ratio. has_skip: whether apply skip connection. drop_path_prob: drop path probability. norm_name: normalization function. act_name: activation function. """ def __init__( self, w_in: int, w_out: int, stride: int, kernel: int, exp_r: float, se_r: float, has_skip: bool, drop_path_prob: float, norm_name: str, act_name: str, **kwargs, ): super().__init__() # Expansion w_mid = w_in w_exp = int(w_in * exp_r) if exp_r != 1.0: self.exp = conv2d(w_in, w_exp, kernel, stride=stride) self.exp_bn = norm2d(norm_name, w_exp) self.exp_act = activation(act_name) w_mid = w_exp # SE if se_r > 0.0: w_se = int(w_in * se_r) self.se = SE(w_mid, w_se, act_name) # PWConv self.proj = conv2d( w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride ) self.proj_bn = norm2d(norm_name, w_out) self.has_proj_act = exp_r == 1.0 if self.has_proj_act: self.proj_act = activation(act_name) # Skip self.has_skip = has_skip and w_in == w_out and stride == 1 if self.has_skip: self.drop_path = DropPath(drop_path_prob) def forward(self, x): x_p = x if getattr(self, "exp", None) is not None: x = self.exp(x) x = self.exp_bn(x) x = self.exp_act(x) if getattr(self, "se", None) is not None: x = self.se(x) x = self.proj(x) x = self.proj_bn(x) if self.has_proj_act: x = self.proj_act(x) if self.has_skip: x = self.drop_path(x) x = x + x_p return x @registers.models.register() class EffNet(M.Module): """EfficientNet model. Args: stem_w: stem width. block_name: block name. depths: depth for each stage (number of blocks in the stage). widths: width for each stage (width of each block in the stage). strides: strides for each stage (applies to the first block of each stage). kernels: kernel sizes for each stage. exp_rs: expansion ratios for MBConv blocks in each stage. se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25`` drop_path_prob: drop path probability. Default: ``0.0`` depth_mult: depth multiplier. Default: ``1.0`` width_mult: width multiplier. Default: ``1.0`` omit_mult: omit multiplier for stem width, head width, the first stage depth and the last stage depth, enabled in EfficientNet-Lite. Default: ``False`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"silu"`` head: head args. Default: ``None`` """ def __init__( self, stem_w: int, block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]], depths: Sequence[int], widths: Sequence[int], strides: Sequence[int], kernels: Sequence[int], exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0, se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0, drop_path_prob: float = 0.0, depth_mult: float = 1.0, width_mult: float = 1.0, omit_mult: bool = False, norm_name: str = "BN", act_name: str = "silu", head: Mapping[str, Any] = None, ): super().__init__() depths = [ d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult) for i, d in enumerate(depths) ] self.depths = depths stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9) self.stem = SimpleStem(3, stem_w, norm_name, act_name) if isinstance(block_name, (str, Callable)): block_name = [block_name] * len(depths) block_func = [self.get_block_func(bn) for bn in block_name] widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths] if isinstance(exp_rs, Real): exp_rs = [exp_rs] * len(depths) if isinstance(se_rs, Real): se_rs = [se_rs] * len(depths) drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths))) drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths] model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs] prev_w = stem_w for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)): stage = AnyStage( prev_w, w, s, d, bf, kernel=k, exp_r=exp_r, se_r=se_r, se_from_exp=False, se_act_name=act_name, se_approx=False, se_rd_fn=int, has_proj_act=False, has_skip=True, drop_path_prob=dp_p, norm_name=norm_name, act_name=act_name, ) setattr(self, f"s{i + 1}", stage) prev_w = w if head: if head.get("width", 0) > 0 and not omit_mult: head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9) self.head = build_head(prev_w, head, norm_name, act_name) self.apply(init_weights) def forward(self, x): x = self.stem(x) for i in range(len(self.depths)): stage = getattr(self, f"s{i + 1}") x = stage(x) if getattr(self, "head", None) is not None: x = self.head(x) return x @staticmethod def get_block_func(name: Union[str, Callable]): """Retrieves the block function by name.""" if callable(name): return name if isinstance(name, str): block_funcs = { "FuseMBConv": FuseMBConv, "MBConv": MBConv, } if name in block_funcs.keys(): return block_funcs[name] raise ValueError(f"Block '{name}' not supported") def _build_effnet(**kwargs): model_args = dict( stem_w=32, block_name=MBConv, depths=[1, 2, 2, 3, 3, 4, 1], widths=[16, 24, 40, 80, 112, 192, 320], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 5, 3, 5, 5, 3], exp_rs=[1, 6, 6, 6, 6, 6, 6], se_rs=0.25, drop_path_prob=0.2, head=dict(name="ClsHead", width=1280, dropout_prob=0.2), ) recursive_update(model_args, kwargs) return EffNet(**model_args) def _build_effnet_lite(**kwargs): model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6") recursive_update(model_args, kwargs) return _build_effnet(**model_args) def _build_effnetv2(**kwargs): model_args = dict( stem_w=32, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv], depths=[1, 2, 2, 3, 5, 8], widths=[16, 32, 48, 96, 112, 192], strides=[1, 2, 2, 2, 1, 2], kernels=[3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25], ) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl" ) def effnet_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl" ) def effnet_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl" ) def effnet_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3/effnet_b3.pkl" ) def effnet_b3(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4/effnet_b4.pkl" ) def effnet_b4(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b5/effnet_b5.pkl" ) def effnet_b5(**kwargs): model_args = dict(depth_mult=2.2, width_mult=1.6, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b6/effnet_b6.pkl" ) def effnet_b6(**kwargs): model_args = dict(depth_mult=2.6, width_mult=1.8, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b7/effnet_b7.pkl" ) def effnet_b7(**kwargs): model_args = dict(depth_mult=3.1, width_mult=2.0, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b8/effnet_b8.pkl" ) def effnet_b8(**kwargs): model_args = dict(depth_mult=3.6, width_mult=2.2, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_l2/effnet_l2.pkl" ) def effnet_l2(**kwargs): model_args = dict(depth_mult=5.3, width_mult=4.3, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0_lite/effnet_b0_lite.pkl" ) def effnet_b0_lite(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1_lite/effnet_b1_lite.pkl" ) def effnet_b1_lite(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2_lite/effnet_b2_lite.pkl" ) def effnet_b2_lite(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @
hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3_lite/effnet_b3_lite.pkl" )
megengine.hub.pretrained
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020 <NAME> # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """EfficientNet Series EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_ References: https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py """ import math from numbers import Real from typing import Any, Callable, Mapping, Sequence, Union import megengine.hub as hub import megengine.module as M from basecls.layers import ( SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d, ) from basecls.utils import recursive_update, registers from .mbnet import MBConv from .resnet import AnyStage, SimpleStem __all__ = ["FuseMBConv", "EffNet"] class FuseMBConv(M.Module): """Fusing the proj conv1x1 and depthwise conv into a conv2d. Args: w_in: input width. w_out: output width. stride: stride of conv. kernel: kernel of conv. exp_r: expansion ratio. se_r: SE ratio. has_skip: whether apply skip connection. drop_path_prob: drop path probability. norm_name: normalization function. act_name: activation function. """ def __init__( self, w_in: int, w_out: int, stride: int, kernel: int, exp_r: float, se_r: float, has_skip: bool, drop_path_prob: float, norm_name: str, act_name: str, **kwargs, ): super().__init__() # Expansion w_mid = w_in w_exp = int(w_in * exp_r) if exp_r != 1.0: self.exp = conv2d(w_in, w_exp, kernel, stride=stride) self.exp_bn = norm2d(norm_name, w_exp) self.exp_act = activation(act_name) w_mid = w_exp # SE if se_r > 0.0: w_se = int(w_in * se_r) self.se = SE(w_mid, w_se, act_name) # PWConv self.proj = conv2d( w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride ) self.proj_bn = norm2d(norm_name, w_out) self.has_proj_act = exp_r == 1.0 if self.has_proj_act: self.proj_act = activation(act_name) # Skip self.has_skip = has_skip and w_in == w_out and stride == 1 if self.has_skip: self.drop_path = DropPath(drop_path_prob) def forward(self, x): x_p = x if getattr(self, "exp", None) is not None: x = self.exp(x) x = self.exp_bn(x) x = self.exp_act(x) if getattr(self, "se", None) is not None: x = self.se(x) x = self.proj(x) x = self.proj_bn(x) if self.has_proj_act: x = self.proj_act(x) if self.has_skip: x = self.drop_path(x) x = x + x_p return x @registers.models.register() class EffNet(M.Module): """EfficientNet model. Args: stem_w: stem width. block_name: block name. depths: depth for each stage (number of blocks in the stage). widths: width for each stage (width of each block in the stage). strides: strides for each stage (applies to the first block of each stage). kernels: kernel sizes for each stage. exp_rs: expansion ratios for MBConv blocks in each stage. se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25`` drop_path_prob: drop path probability. Default: ``0.0`` depth_mult: depth multiplier. Default: ``1.0`` width_mult: width multiplier. Default: ``1.0`` omit_mult: omit multiplier for stem width, head width, the first stage depth and the last stage depth, enabled in EfficientNet-Lite. Default: ``False`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"silu"`` head: head args. Default: ``None`` """ def __init__( self, stem_w: int, block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]], depths: Sequence[int], widths: Sequence[int], strides: Sequence[int], kernels: Sequence[int], exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0, se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0, drop_path_prob: float = 0.0, depth_mult: float = 1.0, width_mult: float = 1.0, omit_mult: bool = False, norm_name: str = "BN", act_name: str = "silu", head: Mapping[str, Any] = None, ): super().__init__() depths = [ d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult) for i, d in enumerate(depths) ] self.depths = depths stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9) self.stem = SimpleStem(3, stem_w, norm_name, act_name) if isinstance(block_name, (str, Callable)): block_name = [block_name] * len(depths) block_func = [self.get_block_func(bn) for bn in block_name] widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths] if isinstance(exp_rs, Real): exp_rs = [exp_rs] * len(depths) if isinstance(se_rs, Real): se_rs = [se_rs] * len(depths) drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths))) drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths] model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs] prev_w = stem_w for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)): stage = AnyStage( prev_w, w, s, d, bf, kernel=k, exp_r=exp_r, se_r=se_r, se_from_exp=False, se_act_name=act_name, se_approx=False, se_rd_fn=int, has_proj_act=False, has_skip=True, drop_path_prob=dp_p, norm_name=norm_name, act_name=act_name, ) setattr(self, f"s{i + 1}", stage) prev_w = w if head: if head.get("width", 0) > 0 and not omit_mult: head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9) self.head = build_head(prev_w, head, norm_name, act_name) self.apply(init_weights) def forward(self, x): x = self.stem(x) for i in range(len(self.depths)): stage = getattr(self, f"s{i + 1}") x = stage(x) if getattr(self, "head", None) is not None: x = self.head(x) return x @staticmethod def get_block_func(name: Union[str, Callable]): """Retrieves the block function by name.""" if callable(name): return name if isinstance(name, str): block_funcs = { "FuseMBConv": FuseMBConv, "MBConv": MBConv, } if name in block_funcs.keys(): return block_funcs[name] raise ValueError(f"Block '{name}' not supported") def _build_effnet(**kwargs): model_args = dict( stem_w=32, block_name=MBConv, depths=[1, 2, 2, 3, 3, 4, 1], widths=[16, 24, 40, 80, 112, 192, 320], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 5, 3, 5, 5, 3], exp_rs=[1, 6, 6, 6, 6, 6, 6], se_rs=0.25, drop_path_prob=0.2, head=dict(name="ClsHead", width=1280, dropout_prob=0.2), ) recursive_update(model_args, kwargs) return EffNet(**model_args) def _build_effnet_lite(**kwargs): model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6") recursive_update(model_args, kwargs) return _build_effnet(**model_args) def _build_effnetv2(**kwargs): model_args = dict( stem_w=32, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv], depths=[1, 2, 2, 3, 5, 8], widths=[16, 32, 48, 96, 112, 192], strides=[1, 2, 2, 2, 1, 2], kernels=[3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25], ) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl" ) def effnet_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl" ) def effnet_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl" ) def effnet_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3/effnet_b3.pkl" ) def effnet_b3(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4/effnet_b4.pkl" ) def effnet_b4(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b5/effnet_b5.pkl" ) def effnet_b5(**kwargs): model_args = dict(depth_mult=2.2, width_mult=1.6, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b6/effnet_b6.pkl" ) def effnet_b6(**kwargs): model_args = dict(depth_mult=2.6, width_mult=1.8, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b7/effnet_b7.pkl" ) def effnet_b7(**kwargs): model_args = dict(depth_mult=3.1, width_mult=2.0, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b8/effnet_b8.pkl" ) def effnet_b8(**kwargs): model_args = dict(depth_mult=3.6, width_mult=2.2, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_l2/effnet_l2.pkl" ) def effnet_l2(**kwargs): model_args = dict(depth_mult=5.3, width_mult=4.3, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0_lite/effnet_b0_lite.pkl" ) def effnet_b0_lite(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1_lite/effnet_b1_lite.pkl" ) def effnet_b1_lite(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2_lite/effnet_b2_lite.pkl" ) def effnet_b2_lite(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3_lite/effnet_b3_lite.pkl" ) def effnet_b3_lite(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @
hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4_lite/effnet_b4_lite.pkl" )
megengine.hub.pretrained
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020 <NAME> # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """EfficientNet Series EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_ References: https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py """ import math from numbers import Real from typing import Any, Callable, Mapping, Sequence, Union import megengine.hub as hub import megengine.module as M from basecls.layers import ( SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d, ) from basecls.utils import recursive_update, registers from .mbnet import MBConv from .resnet import AnyStage, SimpleStem __all__ = ["FuseMBConv", "EffNet"] class FuseMBConv(M.Module): """Fusing the proj conv1x1 and depthwise conv into a conv2d. Args: w_in: input width. w_out: output width. stride: stride of conv. kernel: kernel of conv. exp_r: expansion ratio. se_r: SE ratio. has_skip: whether apply skip connection. drop_path_prob: drop path probability. norm_name: normalization function. act_name: activation function. """ def __init__( self, w_in: int, w_out: int, stride: int, kernel: int, exp_r: float, se_r: float, has_skip: bool, drop_path_prob: float, norm_name: str, act_name: str, **kwargs, ): super().__init__() # Expansion w_mid = w_in w_exp = int(w_in * exp_r) if exp_r != 1.0: self.exp = conv2d(w_in, w_exp, kernel, stride=stride) self.exp_bn = norm2d(norm_name, w_exp) self.exp_act = activation(act_name) w_mid = w_exp # SE if se_r > 0.0: w_se = int(w_in * se_r) self.se = SE(w_mid, w_se, act_name) # PWConv self.proj = conv2d( w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride ) self.proj_bn = norm2d(norm_name, w_out) self.has_proj_act = exp_r == 1.0 if self.has_proj_act: self.proj_act = activation(act_name) # Skip self.has_skip = has_skip and w_in == w_out and stride == 1 if self.has_skip: self.drop_path = DropPath(drop_path_prob) def forward(self, x): x_p = x if getattr(self, "exp", None) is not None: x = self.exp(x) x = self.exp_bn(x) x = self.exp_act(x) if getattr(self, "se", None) is not None: x = self.se(x) x = self.proj(x) x = self.proj_bn(x) if self.has_proj_act: x = self.proj_act(x) if self.has_skip: x = self.drop_path(x) x = x + x_p return x @registers.models.register() class EffNet(M.Module): """EfficientNet model. Args: stem_w: stem width. block_name: block name. depths: depth for each stage (number of blocks in the stage). widths: width for each stage (width of each block in the stage). strides: strides for each stage (applies to the first block of each stage). kernels: kernel sizes for each stage. exp_rs: expansion ratios for MBConv blocks in each stage. se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25`` drop_path_prob: drop path probability. Default: ``0.0`` depth_mult: depth multiplier. Default: ``1.0`` width_mult: width multiplier. Default: ``1.0`` omit_mult: omit multiplier for stem width, head width, the first stage depth and the last stage depth, enabled in EfficientNet-Lite. Default: ``False`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"silu"`` head: head args. Default: ``None`` """ def __init__( self, stem_w: int, block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]], depths: Sequence[int], widths: Sequence[int], strides: Sequence[int], kernels: Sequence[int], exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0, se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0, drop_path_prob: float = 0.0, depth_mult: float = 1.0, width_mult: float = 1.0, omit_mult: bool = False, norm_name: str = "BN", act_name: str = "silu", head: Mapping[str, Any] = None, ): super().__init__() depths = [ d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult) for i, d in enumerate(depths) ] self.depths = depths stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9) self.stem = SimpleStem(3, stem_w, norm_name, act_name) if isinstance(block_name, (str, Callable)): block_name = [block_name] * len(depths) block_func = [self.get_block_func(bn) for bn in block_name] widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths] if isinstance(exp_rs, Real): exp_rs = [exp_rs] * len(depths) if isinstance(se_rs, Real): se_rs = [se_rs] * len(depths) drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths))) drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths] model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs] prev_w = stem_w for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)): stage = AnyStage( prev_w, w, s, d, bf, kernel=k, exp_r=exp_r, se_r=se_r, se_from_exp=False, se_act_name=act_name, se_approx=False, se_rd_fn=int, has_proj_act=False, has_skip=True, drop_path_prob=dp_p, norm_name=norm_name, act_name=act_name, ) setattr(self, f"s{i + 1}", stage) prev_w = w if head: if head.get("width", 0) > 0 and not omit_mult: head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9) self.head = build_head(prev_w, head, norm_name, act_name) self.apply(init_weights) def forward(self, x): x = self.stem(x) for i in range(len(self.depths)): stage = getattr(self, f"s{i + 1}") x = stage(x) if getattr(self, "head", None) is not None: x = self.head(x) return x @staticmethod def get_block_func(name: Union[str, Callable]): """Retrieves the block function by name.""" if callable(name): return name if isinstance(name, str): block_funcs = { "FuseMBConv": FuseMBConv, "MBConv": MBConv, } if name in block_funcs.keys(): return block_funcs[name] raise ValueError(f"Block '{name}' not supported") def _build_effnet(**kwargs): model_args = dict( stem_w=32, block_name=MBConv, depths=[1, 2, 2, 3, 3, 4, 1], widths=[16, 24, 40, 80, 112, 192, 320], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 5, 3, 5, 5, 3], exp_rs=[1, 6, 6, 6, 6, 6, 6], se_rs=0.25, drop_path_prob=0.2, head=dict(name="ClsHead", width=1280, dropout_prob=0.2), ) recursive_update(model_args, kwargs) return EffNet(**model_args) def _build_effnet_lite(**kwargs): model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6") recursive_update(model_args, kwargs) return _build_effnet(**model_args) def _build_effnetv2(**kwargs): model_args = dict( stem_w=32, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv], depths=[1, 2, 2, 3, 5, 8], widths=[16, 32, 48, 96, 112, 192], strides=[1, 2, 2, 2, 1, 2], kernels=[3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25], ) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl" ) def effnet_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl" ) def effnet_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl" ) def effnet_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3/effnet_b3.pkl" ) def effnet_b3(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4/effnet_b4.pkl" ) def effnet_b4(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b5/effnet_b5.pkl" ) def effnet_b5(**kwargs): model_args = dict(depth_mult=2.2, width_mult=1.6, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b6/effnet_b6.pkl" ) def effnet_b6(**kwargs): model_args = dict(depth_mult=2.6, width_mult=1.8, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b7/effnet_b7.pkl" ) def effnet_b7(**kwargs): model_args = dict(depth_mult=3.1, width_mult=2.0, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b8/effnet_b8.pkl" ) def effnet_b8(**kwargs): model_args = dict(depth_mult=3.6, width_mult=2.2, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_l2/effnet_l2.pkl" ) def effnet_l2(**kwargs): model_args = dict(depth_mult=5.3, width_mult=4.3, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0_lite/effnet_b0_lite.pkl" ) def effnet_b0_lite(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1_lite/effnet_b1_lite.pkl" ) def effnet_b1_lite(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2_lite/effnet_b2_lite.pkl" ) def effnet_b2_lite(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3_lite/effnet_b3_lite.pkl" ) def effnet_b3_lite(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4_lite/effnet_b4_lite.pkl" ) def effnet_b4_lite(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @
hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b0/effnetv2_b0.pkl" )
megengine.hub.pretrained
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020 <NAME> # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """EfficientNet Series EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_ References: https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py """ import math from numbers import Real from typing import Any, Callable, Mapping, Sequence, Union import megengine.hub as hub import megengine.module as M from basecls.layers import ( SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d, ) from basecls.utils import recursive_update, registers from .mbnet import MBConv from .resnet import AnyStage, SimpleStem __all__ = ["FuseMBConv", "EffNet"] class FuseMBConv(M.Module): """Fusing the proj conv1x1 and depthwise conv into a conv2d. Args: w_in: input width. w_out: output width. stride: stride of conv. kernel: kernel of conv. exp_r: expansion ratio. se_r: SE ratio. has_skip: whether apply skip connection. drop_path_prob: drop path probability. norm_name: normalization function. act_name: activation function. """ def __init__( self, w_in: int, w_out: int, stride: int, kernel: int, exp_r: float, se_r: float, has_skip: bool, drop_path_prob: float, norm_name: str, act_name: str, **kwargs, ): super().__init__() # Expansion w_mid = w_in w_exp = int(w_in * exp_r) if exp_r != 1.0: self.exp = conv2d(w_in, w_exp, kernel, stride=stride) self.exp_bn = norm2d(norm_name, w_exp) self.exp_act = activation(act_name) w_mid = w_exp # SE if se_r > 0.0: w_se = int(w_in * se_r) self.se = SE(w_mid, w_se, act_name) # PWConv self.proj = conv2d( w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride ) self.proj_bn = norm2d(norm_name, w_out) self.has_proj_act = exp_r == 1.0 if self.has_proj_act: self.proj_act = activation(act_name) # Skip self.has_skip = has_skip and w_in == w_out and stride == 1 if self.has_skip: self.drop_path = DropPath(drop_path_prob) def forward(self, x): x_p = x if getattr(self, "exp", None) is not None: x = self.exp(x) x = self.exp_bn(x) x = self.exp_act(x) if getattr(self, "se", None) is not None: x = self.se(x) x = self.proj(x) x = self.proj_bn(x) if self.has_proj_act: x = self.proj_act(x) if self.has_skip: x = self.drop_path(x) x = x + x_p return x @registers.models.register() class EffNet(M.Module): """EfficientNet model. Args: stem_w: stem width. block_name: block name. depths: depth for each stage (number of blocks in the stage). widths: width for each stage (width of each block in the stage). strides: strides for each stage (applies to the first block of each stage). kernels: kernel sizes for each stage. exp_rs: expansion ratios for MBConv blocks in each stage. se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25`` drop_path_prob: drop path probability. Default: ``0.0`` depth_mult: depth multiplier. Default: ``1.0`` width_mult: width multiplier. Default: ``1.0`` omit_mult: omit multiplier for stem width, head width, the first stage depth and the last stage depth, enabled in EfficientNet-Lite. Default: ``False`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"silu"`` head: head args. Default: ``None`` """ def __init__( self, stem_w: int, block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]], depths: Sequence[int], widths: Sequence[int], strides: Sequence[int], kernels: Sequence[int], exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0, se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0, drop_path_prob: float = 0.0, depth_mult: float = 1.0, width_mult: float = 1.0, omit_mult: bool = False, norm_name: str = "BN", act_name: str = "silu", head: Mapping[str, Any] = None, ): super().__init__() depths = [ d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult) for i, d in enumerate(depths) ] self.depths = depths stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9) self.stem = SimpleStem(3, stem_w, norm_name, act_name) if isinstance(block_name, (str, Callable)): block_name = [block_name] * len(depths) block_func = [self.get_block_func(bn) for bn in block_name] widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths] if isinstance(exp_rs, Real): exp_rs = [exp_rs] * len(depths) if isinstance(se_rs, Real): se_rs = [se_rs] * len(depths) drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths))) drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths] model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs] prev_w = stem_w for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)): stage = AnyStage( prev_w, w, s, d, bf, kernel=k, exp_r=exp_r, se_r=se_r, se_from_exp=False, se_act_name=act_name, se_approx=False, se_rd_fn=int, has_proj_act=False, has_skip=True, drop_path_prob=dp_p, norm_name=norm_name, act_name=act_name, ) setattr(self, f"s{i + 1}", stage) prev_w = w if head: if head.get("width", 0) > 0 and not omit_mult: head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9) self.head = build_head(prev_w, head, norm_name, act_name) self.apply(init_weights) def forward(self, x): x = self.stem(x) for i in range(len(self.depths)): stage = getattr(self, f"s{i + 1}") x = stage(x) if getattr(self, "head", None) is not None: x = self.head(x) return x @staticmethod def get_block_func(name: Union[str, Callable]): """Retrieves the block function by name.""" if callable(name): return name if isinstance(name, str): block_funcs = { "FuseMBConv": FuseMBConv, "MBConv": MBConv, } if name in block_funcs.keys(): return block_funcs[name] raise ValueError(f"Block '{name}' not supported") def _build_effnet(**kwargs): model_args = dict( stem_w=32, block_name=MBConv, depths=[1, 2, 2, 3, 3, 4, 1], widths=[16, 24, 40, 80, 112, 192, 320], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 5, 3, 5, 5, 3], exp_rs=[1, 6, 6, 6, 6, 6, 6], se_rs=0.25, drop_path_prob=0.2, head=dict(name="ClsHead", width=1280, dropout_prob=0.2), ) recursive_update(model_args, kwargs) return EffNet(**model_args) def _build_effnet_lite(**kwargs): model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6") recursive_update(model_args, kwargs) return _build_effnet(**model_args) def _build_effnetv2(**kwargs): model_args = dict( stem_w=32, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv], depths=[1, 2, 2, 3, 5, 8], widths=[16, 32, 48, 96, 112, 192], strides=[1, 2, 2, 2, 1, 2], kernels=[3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25], ) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl" ) def effnet_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl" ) def effnet_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl" ) def effnet_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3/effnet_b3.pkl" ) def effnet_b3(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4/effnet_b4.pkl" ) def effnet_b4(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b5/effnet_b5.pkl" ) def effnet_b5(**kwargs): model_args = dict(depth_mult=2.2, width_mult=1.6, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b6/effnet_b6.pkl" ) def effnet_b6(**kwargs): model_args = dict(depth_mult=2.6, width_mult=1.8, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b7/effnet_b7.pkl" ) def effnet_b7(**kwargs): model_args = dict(depth_mult=3.1, width_mult=2.0, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b8/effnet_b8.pkl" ) def effnet_b8(**kwargs): model_args = dict(depth_mult=3.6, width_mult=2.2, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_l2/effnet_l2.pkl" ) def effnet_l2(**kwargs): model_args = dict(depth_mult=5.3, width_mult=4.3, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0_lite/effnet_b0_lite.pkl" ) def effnet_b0_lite(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1_lite/effnet_b1_lite.pkl" ) def effnet_b1_lite(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2_lite/effnet_b2_lite.pkl" ) def effnet_b2_lite(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3_lite/effnet_b3_lite.pkl" ) def effnet_b3_lite(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4_lite/effnet_b4_lite.pkl" ) def effnet_b4_lite(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b0/effnetv2_b0.pkl" ) def effnetv2_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnetv2(**model_args) @registers.models.register() @
hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b1/effnetv2_b1.pkl" )
megengine.hub.pretrained
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020 <NAME> # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """EfficientNet Series EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_ References: https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py """ import math from numbers import Real from typing import Any, Callable, Mapping, Sequence, Union import megengine.hub as hub import megengine.module as M from basecls.layers import ( SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d, ) from basecls.utils import recursive_update, registers from .mbnet import MBConv from .resnet import AnyStage, SimpleStem __all__ = ["FuseMBConv", "EffNet"] class FuseMBConv(M.Module): """Fusing the proj conv1x1 and depthwise conv into a conv2d. Args: w_in: input width. w_out: output width. stride: stride of conv. kernel: kernel of conv. exp_r: expansion ratio. se_r: SE ratio. has_skip: whether apply skip connection. drop_path_prob: drop path probability. norm_name: normalization function. act_name: activation function. """ def __init__( self, w_in: int, w_out: int, stride: int, kernel: int, exp_r: float, se_r: float, has_skip: bool, drop_path_prob: float, norm_name: str, act_name: str, **kwargs, ): super().__init__() # Expansion w_mid = w_in w_exp = int(w_in * exp_r) if exp_r != 1.0: self.exp = conv2d(w_in, w_exp, kernel, stride=stride) self.exp_bn = norm2d(norm_name, w_exp) self.exp_act = activation(act_name) w_mid = w_exp # SE if se_r > 0.0: w_se = int(w_in * se_r) self.se = SE(w_mid, w_se, act_name) # PWConv self.proj = conv2d( w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride ) self.proj_bn = norm2d(norm_name, w_out) self.has_proj_act = exp_r == 1.0 if self.has_proj_act: self.proj_act = activation(act_name) # Skip self.has_skip = has_skip and w_in == w_out and stride == 1 if self.has_skip: self.drop_path = DropPath(drop_path_prob) def forward(self, x): x_p = x if getattr(self, "exp", None) is not None: x = self.exp(x) x = self.exp_bn(x) x = self.exp_act(x) if getattr(self, "se", None) is not None: x = self.se(x) x = self.proj(x) x = self.proj_bn(x) if self.has_proj_act: x = self.proj_act(x) if self.has_skip: x = self.drop_path(x) x = x + x_p return x @registers.models.register() class EffNet(M.Module): """EfficientNet model. Args: stem_w: stem width. block_name: block name. depths: depth for each stage (number of blocks in the stage). widths: width for each stage (width of each block in the stage). strides: strides for each stage (applies to the first block of each stage). kernels: kernel sizes for each stage. exp_rs: expansion ratios for MBConv blocks in each stage. se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25`` drop_path_prob: drop path probability. Default: ``0.0`` depth_mult: depth multiplier. Default: ``1.0`` width_mult: width multiplier. Default: ``1.0`` omit_mult: omit multiplier for stem width, head width, the first stage depth and the last stage depth, enabled in EfficientNet-Lite. Default: ``False`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"silu"`` head: head args. Default: ``None`` """ def __init__( self, stem_w: int, block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]], depths: Sequence[int], widths: Sequence[int], strides: Sequence[int], kernels: Sequence[int], exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0, se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0, drop_path_prob: float = 0.0, depth_mult: float = 1.0, width_mult: float = 1.0, omit_mult: bool = False, norm_name: str = "BN", act_name: str = "silu", head: Mapping[str, Any] = None, ): super().__init__() depths = [ d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult) for i, d in enumerate(depths) ] self.depths = depths stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9) self.stem = SimpleStem(3, stem_w, norm_name, act_name) if isinstance(block_name, (str, Callable)): block_name = [block_name] * len(depths) block_func = [self.get_block_func(bn) for bn in block_name] widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths] if isinstance(exp_rs, Real): exp_rs = [exp_rs] * len(depths) if isinstance(se_rs, Real): se_rs = [se_rs] * len(depths) drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths))) drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths] model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs] prev_w = stem_w for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)): stage = AnyStage( prev_w, w, s, d, bf, kernel=k, exp_r=exp_r, se_r=se_r, se_from_exp=False, se_act_name=act_name, se_approx=False, se_rd_fn=int, has_proj_act=False, has_skip=True, drop_path_prob=dp_p, norm_name=norm_name, act_name=act_name, ) setattr(self, f"s{i + 1}", stage) prev_w = w if head: if head.get("width", 0) > 0 and not omit_mult: head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9) self.head = build_head(prev_w, head, norm_name, act_name) self.apply(init_weights) def forward(self, x): x = self.stem(x) for i in range(len(self.depths)): stage = getattr(self, f"s{i + 1}") x = stage(x) if getattr(self, "head", None) is not None: x = self.head(x) return x @staticmethod def get_block_func(name: Union[str, Callable]): """Retrieves the block function by name.""" if callable(name): return name if isinstance(name, str): block_funcs = { "FuseMBConv": FuseMBConv, "MBConv": MBConv, } if name in block_funcs.keys(): return block_funcs[name] raise ValueError(f"Block '{name}' not supported") def _build_effnet(**kwargs): model_args = dict( stem_w=32, block_name=MBConv, depths=[1, 2, 2, 3, 3, 4, 1], widths=[16, 24, 40, 80, 112, 192, 320], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 5, 3, 5, 5, 3], exp_rs=[1, 6, 6, 6, 6, 6, 6], se_rs=0.25, drop_path_prob=0.2, head=dict(name="ClsHead", width=1280, dropout_prob=0.2), ) recursive_update(model_args, kwargs) return EffNet(**model_args) def _build_effnet_lite(**kwargs): model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6") recursive_update(model_args, kwargs) return _build_effnet(**model_args) def _build_effnetv2(**kwargs): model_args = dict( stem_w=32, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv], depths=[1, 2, 2, 3, 5, 8], widths=[16, 32, 48, 96, 112, 192], strides=[1, 2, 2, 2, 1, 2], kernels=[3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25], ) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl" ) def effnet_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl" ) def effnet_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl" ) def effnet_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3/effnet_b3.pkl" ) def effnet_b3(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4/effnet_b4.pkl" ) def effnet_b4(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b5/effnet_b5.pkl" ) def effnet_b5(**kwargs): model_args = dict(depth_mult=2.2, width_mult=1.6, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b6/effnet_b6.pkl" ) def effnet_b6(**kwargs): model_args = dict(depth_mult=2.6, width_mult=1.8, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b7/effnet_b7.pkl" ) def effnet_b7(**kwargs): model_args = dict(depth_mult=3.1, width_mult=2.0, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b8/effnet_b8.pkl" ) def effnet_b8(**kwargs): model_args = dict(depth_mult=3.6, width_mult=2.2, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_l2/effnet_l2.pkl" ) def effnet_l2(**kwargs): model_args = dict(depth_mult=5.3, width_mult=4.3, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0_lite/effnet_b0_lite.pkl" ) def effnet_b0_lite(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1_lite/effnet_b1_lite.pkl" ) def effnet_b1_lite(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2_lite/effnet_b2_lite.pkl" ) def effnet_b2_lite(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3_lite/effnet_b3_lite.pkl" ) def effnet_b3_lite(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4_lite/effnet_b4_lite.pkl" ) def effnet_b4_lite(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b0/effnetv2_b0.pkl" ) def effnetv2_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnetv2(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b1/effnetv2_b1.pkl" ) def effnetv2_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnetv2(**model_args) @registers.models.register() @
hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b2/effnetv2_b2.pkl" )
megengine.hub.pretrained
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020 <NAME> # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """EfficientNet Series EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_ References: https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py """ import math from numbers import Real from typing import Any, Callable, Mapping, Sequence, Union import megengine.hub as hub import megengine.module as M from basecls.layers import ( SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d, ) from basecls.utils import recursive_update, registers from .mbnet import MBConv from .resnet import AnyStage, SimpleStem __all__ = ["FuseMBConv", "EffNet"] class FuseMBConv(M.Module): """Fusing the proj conv1x1 and depthwise conv into a conv2d. Args: w_in: input width. w_out: output width. stride: stride of conv. kernel: kernel of conv. exp_r: expansion ratio. se_r: SE ratio. has_skip: whether apply skip connection. drop_path_prob: drop path probability. norm_name: normalization function. act_name: activation function. """ def __init__( self, w_in: int, w_out: int, stride: int, kernel: int, exp_r: float, se_r: float, has_skip: bool, drop_path_prob: float, norm_name: str, act_name: str, **kwargs, ): super().__init__() # Expansion w_mid = w_in w_exp = int(w_in * exp_r) if exp_r != 1.0: self.exp = conv2d(w_in, w_exp, kernel, stride=stride) self.exp_bn = norm2d(norm_name, w_exp) self.exp_act = activation(act_name) w_mid = w_exp # SE if se_r > 0.0: w_se = int(w_in * se_r) self.se = SE(w_mid, w_se, act_name) # PWConv self.proj = conv2d( w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride ) self.proj_bn = norm2d(norm_name, w_out) self.has_proj_act = exp_r == 1.0 if self.has_proj_act: self.proj_act = activation(act_name) # Skip self.has_skip = has_skip and w_in == w_out and stride == 1 if self.has_skip: self.drop_path = DropPath(drop_path_prob) def forward(self, x): x_p = x if getattr(self, "exp", None) is not None: x = self.exp(x) x = self.exp_bn(x) x = self.exp_act(x) if getattr(self, "se", None) is not None: x = self.se(x) x = self.proj(x) x = self.proj_bn(x) if self.has_proj_act: x = self.proj_act(x) if self.has_skip: x = self.drop_path(x) x = x + x_p return x @registers.models.register() class EffNet(M.Module): """EfficientNet model. Args: stem_w: stem width. block_name: block name. depths: depth for each stage (number of blocks in the stage). widths: width for each stage (width of each block in the stage). strides: strides for each stage (applies to the first block of each stage). kernels: kernel sizes for each stage. exp_rs: expansion ratios for MBConv blocks in each stage. se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25`` drop_path_prob: drop path probability. Default: ``0.0`` depth_mult: depth multiplier. Default: ``1.0`` width_mult: width multiplier. Default: ``1.0`` omit_mult: omit multiplier for stem width, head width, the first stage depth and the last stage depth, enabled in EfficientNet-Lite. Default: ``False`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"silu"`` head: head args. Default: ``None`` """ def __init__( self, stem_w: int, block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]], depths: Sequence[int], widths: Sequence[int], strides: Sequence[int], kernels: Sequence[int], exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0, se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0, drop_path_prob: float = 0.0, depth_mult: float = 1.0, width_mult: float = 1.0, omit_mult: bool = False, norm_name: str = "BN", act_name: str = "silu", head: Mapping[str, Any] = None, ): super().__init__() depths = [ d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult) for i, d in enumerate(depths) ] self.depths = depths stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9) self.stem = SimpleStem(3, stem_w, norm_name, act_name) if isinstance(block_name, (str, Callable)): block_name = [block_name] * len(depths) block_func = [self.get_block_func(bn) for bn in block_name] widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths] if isinstance(exp_rs, Real): exp_rs = [exp_rs] * len(depths) if isinstance(se_rs, Real): se_rs = [se_rs] * len(depths) drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths))) drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths] model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs] prev_w = stem_w for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)): stage = AnyStage( prev_w, w, s, d, bf, kernel=k, exp_r=exp_r, se_r=se_r, se_from_exp=False, se_act_name=act_name, se_approx=False, se_rd_fn=int, has_proj_act=False, has_skip=True, drop_path_prob=dp_p, norm_name=norm_name, act_name=act_name, ) setattr(self, f"s{i + 1}", stage) prev_w = w if head: if head.get("width", 0) > 0 and not omit_mult: head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9) self.head = build_head(prev_w, head, norm_name, act_name) self.apply(init_weights) def forward(self, x): x = self.stem(x) for i in range(len(self.depths)): stage = getattr(self, f"s{i + 1}") x = stage(x) if getattr(self, "head", None) is not None: x = self.head(x) return x @staticmethod def get_block_func(name: Union[str, Callable]): """Retrieves the block function by name.""" if callable(name): return name if isinstance(name, str): block_funcs = { "FuseMBConv": FuseMBConv, "MBConv": MBConv, } if name in block_funcs.keys(): return block_funcs[name] raise ValueError(f"Block '{name}' not supported") def _build_effnet(**kwargs): model_args = dict( stem_w=32, block_name=MBConv, depths=[1, 2, 2, 3, 3, 4, 1], widths=[16, 24, 40, 80, 112, 192, 320], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 5, 3, 5, 5, 3], exp_rs=[1, 6, 6, 6, 6, 6, 6], se_rs=0.25, drop_path_prob=0.2, head=dict(name="ClsHead", width=1280, dropout_prob=0.2), ) recursive_update(model_args, kwargs) return EffNet(**model_args) def _build_effnet_lite(**kwargs): model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6") recursive_update(model_args, kwargs) return _build_effnet(**model_args) def _build_effnetv2(**kwargs): model_args = dict( stem_w=32, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv], depths=[1, 2, 2, 3, 5, 8], widths=[16, 32, 48, 96, 112, 192], strides=[1, 2, 2, 2, 1, 2], kernels=[3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25], ) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl" ) def effnet_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl" ) def effnet_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl" ) def effnet_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3/effnet_b3.pkl" ) def effnet_b3(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4/effnet_b4.pkl" ) def effnet_b4(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b5/effnet_b5.pkl" ) def effnet_b5(**kwargs): model_args = dict(depth_mult=2.2, width_mult=1.6, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b6/effnet_b6.pkl" ) def effnet_b6(**kwargs): model_args = dict(depth_mult=2.6, width_mult=1.8, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b7/effnet_b7.pkl" ) def effnet_b7(**kwargs): model_args = dict(depth_mult=3.1, width_mult=2.0, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b8/effnet_b8.pkl" ) def effnet_b8(**kwargs): model_args = dict(depth_mult=3.6, width_mult=2.2, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_l2/effnet_l2.pkl" ) def effnet_l2(**kwargs): model_args = dict(depth_mult=5.3, width_mult=4.3, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0_lite/effnet_b0_lite.pkl" ) def effnet_b0_lite(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1_lite/effnet_b1_lite.pkl" ) def effnet_b1_lite(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2_lite/effnet_b2_lite.pkl" ) def effnet_b2_lite(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3_lite/effnet_b3_lite.pkl" ) def effnet_b3_lite(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4_lite/effnet_b4_lite.pkl" ) def effnet_b4_lite(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b0/effnetv2_b0.pkl" ) def effnetv2_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnetv2(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b1/effnetv2_b1.pkl" ) def effnetv2_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnetv2(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b2/effnetv2_b2.pkl" ) def effnetv2_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnetv2(**model_args) @registers.models.register() @
hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b3/effnetv2_b3.pkl" )
megengine.hub.pretrained
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020 <NAME> # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """EfficientNet Series EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_ References: https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py """ import math from numbers import Real from typing import Any, Callable, Mapping, Sequence, Union import megengine.hub as hub import megengine.module as M from basecls.layers import ( SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d, ) from basecls.utils import recursive_update, registers from .mbnet import MBConv from .resnet import AnyStage, SimpleStem __all__ = ["FuseMBConv", "EffNet"] class FuseMBConv(M.Module): """Fusing the proj conv1x1 and depthwise conv into a conv2d. Args: w_in: input width. w_out: output width. stride: stride of conv. kernel: kernel of conv. exp_r: expansion ratio. se_r: SE ratio. has_skip: whether apply skip connection. drop_path_prob: drop path probability. norm_name: normalization function. act_name: activation function. """ def __init__( self, w_in: int, w_out: int, stride: int, kernel: int, exp_r: float, se_r: float, has_skip: bool, drop_path_prob: float, norm_name: str, act_name: str, **kwargs, ): super().__init__() # Expansion w_mid = w_in w_exp = int(w_in * exp_r) if exp_r != 1.0: self.exp = conv2d(w_in, w_exp, kernel, stride=stride) self.exp_bn = norm2d(norm_name, w_exp) self.exp_act = activation(act_name) w_mid = w_exp # SE if se_r > 0.0: w_se = int(w_in * se_r) self.se = SE(w_mid, w_se, act_name) # PWConv self.proj = conv2d( w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride ) self.proj_bn = norm2d(norm_name, w_out) self.has_proj_act = exp_r == 1.0 if self.has_proj_act: self.proj_act = activation(act_name) # Skip self.has_skip = has_skip and w_in == w_out and stride == 1 if self.has_skip: self.drop_path = DropPath(drop_path_prob) def forward(self, x): x_p = x if getattr(self, "exp", None) is not None: x = self.exp(x) x = self.exp_bn(x) x = self.exp_act(x) if getattr(self, "se", None) is not None: x = self.se(x) x = self.proj(x) x = self.proj_bn(x) if self.has_proj_act: x = self.proj_act(x) if self.has_skip: x = self.drop_path(x) x = x + x_p return x @registers.models.register() class EffNet(M.Module): """EfficientNet model. Args: stem_w: stem width. block_name: block name. depths: depth for each stage (number of blocks in the stage). widths: width for each stage (width of each block in the stage). strides: strides for each stage (applies to the first block of each stage). kernels: kernel sizes for each stage. exp_rs: expansion ratios for MBConv blocks in each stage. se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25`` drop_path_prob: drop path probability. Default: ``0.0`` depth_mult: depth multiplier. Default: ``1.0`` width_mult: width multiplier. Default: ``1.0`` omit_mult: omit multiplier for stem width, head width, the first stage depth and the last stage depth, enabled in EfficientNet-Lite. Default: ``False`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"silu"`` head: head args. Default: ``None`` """ def __init__( self, stem_w: int, block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]], depths: Sequence[int], widths: Sequence[int], strides: Sequence[int], kernels: Sequence[int], exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0, se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0, drop_path_prob: float = 0.0, depth_mult: float = 1.0, width_mult: float = 1.0, omit_mult: bool = False, norm_name: str = "BN", act_name: str = "silu", head: Mapping[str, Any] = None, ): super().__init__() depths = [ d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult) for i, d in enumerate(depths) ] self.depths = depths stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9) self.stem = SimpleStem(3, stem_w, norm_name, act_name) if isinstance(block_name, (str, Callable)): block_name = [block_name] * len(depths) block_func = [self.get_block_func(bn) for bn in block_name] widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths] if isinstance(exp_rs, Real): exp_rs = [exp_rs] * len(depths) if isinstance(se_rs, Real): se_rs = [se_rs] * len(depths) drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths))) drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths] model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs] prev_w = stem_w for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)): stage = AnyStage( prev_w, w, s, d, bf, kernel=k, exp_r=exp_r, se_r=se_r, se_from_exp=False, se_act_name=act_name, se_approx=False, se_rd_fn=int, has_proj_act=False, has_skip=True, drop_path_prob=dp_p, norm_name=norm_name, act_name=act_name, ) setattr(self, f"s{i + 1}", stage) prev_w = w if head: if head.get("width", 0) > 0 and not omit_mult: head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9) self.head = build_head(prev_w, head, norm_name, act_name) self.apply(init_weights) def forward(self, x): x = self.stem(x) for i in range(len(self.depths)): stage = getattr(self, f"s{i + 1}") x = stage(x) if getattr(self, "head", None) is not None: x = self.head(x) return x @staticmethod def get_block_func(name: Union[str, Callable]): """Retrieves the block function by name.""" if callable(name): return name if isinstance(name, str): block_funcs = { "FuseMBConv": FuseMBConv, "MBConv": MBConv, } if name in block_funcs.keys(): return block_funcs[name] raise ValueError(f"Block '{name}' not supported") def _build_effnet(**kwargs): model_args = dict( stem_w=32, block_name=MBConv, depths=[1, 2, 2, 3, 3, 4, 1], widths=[16, 24, 40, 80, 112, 192, 320], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 5, 3, 5, 5, 3], exp_rs=[1, 6, 6, 6, 6, 6, 6], se_rs=0.25, drop_path_prob=0.2, head=dict(name="ClsHead", width=1280, dropout_prob=0.2), ) recursive_update(model_args, kwargs) return EffNet(**model_args) def _build_effnet_lite(**kwargs): model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6") recursive_update(model_args, kwargs) return _build_effnet(**model_args) def _build_effnetv2(**kwargs): model_args = dict( stem_w=32, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv], depths=[1, 2, 2, 3, 5, 8], widths=[16, 32, 48, 96, 112, 192], strides=[1, 2, 2, 2, 1, 2], kernels=[3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25], ) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl" ) def effnet_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl" ) def effnet_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl" ) def effnet_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3/effnet_b3.pkl" ) def effnet_b3(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4/effnet_b4.pkl" ) def effnet_b4(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b5/effnet_b5.pkl" ) def effnet_b5(**kwargs): model_args = dict(depth_mult=2.2, width_mult=1.6, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b6/effnet_b6.pkl" ) def effnet_b6(**kwargs): model_args = dict(depth_mult=2.6, width_mult=1.8, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b7/effnet_b7.pkl" ) def effnet_b7(**kwargs): model_args = dict(depth_mult=3.1, width_mult=2.0, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b8/effnet_b8.pkl" ) def effnet_b8(**kwargs): model_args = dict(depth_mult=3.6, width_mult=2.2, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_l2/effnet_l2.pkl" ) def effnet_l2(**kwargs): model_args = dict(depth_mult=5.3, width_mult=4.3, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0_lite/effnet_b0_lite.pkl" ) def effnet_b0_lite(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1_lite/effnet_b1_lite.pkl" ) def effnet_b1_lite(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2_lite/effnet_b2_lite.pkl" ) def effnet_b2_lite(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3_lite/effnet_b3_lite.pkl" ) def effnet_b3_lite(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4_lite/effnet_b4_lite.pkl" ) def effnet_b4_lite(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b0/effnetv2_b0.pkl" ) def effnetv2_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnetv2(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b1/effnetv2_b1.pkl" ) def effnetv2_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnetv2(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b2/effnetv2_b2.pkl" ) def effnetv2_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnetv2(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b3/effnetv2_b3.pkl" ) def effnetv2_b3(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnetv2(**model_args) @registers.models.register() @
hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_s/effnetv2_s.pkl" )
megengine.hub.pretrained
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020 <NAME> # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """EfficientNet Series EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_ References: https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py """ import math from numbers import Real from typing import Any, Callable, Mapping, Sequence, Union import megengine.hub as hub import megengine.module as M from basecls.layers import ( SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d, ) from basecls.utils import recursive_update, registers from .mbnet import MBConv from .resnet import AnyStage, SimpleStem __all__ = ["FuseMBConv", "EffNet"] class FuseMBConv(M.Module): """Fusing the proj conv1x1 and depthwise conv into a conv2d. Args: w_in: input width. w_out: output width. stride: stride of conv. kernel: kernel of conv. exp_r: expansion ratio. se_r: SE ratio. has_skip: whether apply skip connection. drop_path_prob: drop path probability. norm_name: normalization function. act_name: activation function. """ def __init__( self, w_in: int, w_out: int, stride: int, kernel: int, exp_r: float, se_r: float, has_skip: bool, drop_path_prob: float, norm_name: str, act_name: str, **kwargs, ): super().__init__() # Expansion w_mid = w_in w_exp = int(w_in * exp_r) if exp_r != 1.0: self.exp = conv2d(w_in, w_exp, kernel, stride=stride) self.exp_bn = norm2d(norm_name, w_exp) self.exp_act = activation(act_name) w_mid = w_exp # SE if se_r > 0.0: w_se = int(w_in * se_r) self.se = SE(w_mid, w_se, act_name) # PWConv self.proj = conv2d( w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride ) self.proj_bn = norm2d(norm_name, w_out) self.has_proj_act = exp_r == 1.0 if self.has_proj_act: self.proj_act = activation(act_name) # Skip self.has_skip = has_skip and w_in == w_out and stride == 1 if self.has_skip: self.drop_path = DropPath(drop_path_prob) def forward(self, x): x_p = x if getattr(self, "exp", None) is not None: x = self.exp(x) x = self.exp_bn(x) x = self.exp_act(x) if getattr(self, "se", None) is not None: x = self.se(x) x = self.proj(x) x = self.proj_bn(x) if self.has_proj_act: x = self.proj_act(x) if self.has_skip: x = self.drop_path(x) x = x + x_p return x @registers.models.register() class EffNet(M.Module): """EfficientNet model. Args: stem_w: stem width. block_name: block name. depths: depth for each stage (number of blocks in the stage). widths: width for each stage (width of each block in the stage). strides: strides for each stage (applies to the first block of each stage). kernels: kernel sizes for each stage. exp_rs: expansion ratios for MBConv blocks in each stage. se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25`` drop_path_prob: drop path probability. Default: ``0.0`` depth_mult: depth multiplier. Default: ``1.0`` width_mult: width multiplier. Default: ``1.0`` omit_mult: omit multiplier for stem width, head width, the first stage depth and the last stage depth, enabled in EfficientNet-Lite. Default: ``False`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"silu"`` head: head args. Default: ``None`` """ def __init__( self, stem_w: int, block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]], depths: Sequence[int], widths: Sequence[int], strides: Sequence[int], kernels: Sequence[int], exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0, se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0, drop_path_prob: float = 0.0, depth_mult: float = 1.0, width_mult: float = 1.0, omit_mult: bool = False, norm_name: str = "BN", act_name: str = "silu", head: Mapping[str, Any] = None, ): super().__init__() depths = [ d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult) for i, d in enumerate(depths) ] self.depths = depths stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9) self.stem = SimpleStem(3, stem_w, norm_name, act_name) if isinstance(block_name, (str, Callable)): block_name = [block_name] * len(depths) block_func = [self.get_block_func(bn) for bn in block_name] widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths] if isinstance(exp_rs, Real): exp_rs = [exp_rs] * len(depths) if isinstance(se_rs, Real): se_rs = [se_rs] * len(depths) drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths))) drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths] model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs] prev_w = stem_w for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)): stage = AnyStage( prev_w, w, s, d, bf, kernel=k, exp_r=exp_r, se_r=se_r, se_from_exp=False, se_act_name=act_name, se_approx=False, se_rd_fn=int, has_proj_act=False, has_skip=True, drop_path_prob=dp_p, norm_name=norm_name, act_name=act_name, ) setattr(self, f"s{i + 1}", stage) prev_w = w if head: if head.get("width", 0) > 0 and not omit_mult: head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9) self.head = build_head(prev_w, head, norm_name, act_name) self.apply(init_weights) def forward(self, x): x = self.stem(x) for i in range(len(self.depths)): stage = getattr(self, f"s{i + 1}") x = stage(x) if getattr(self, "head", None) is not None: x = self.head(x) return x @staticmethod def get_block_func(name: Union[str, Callable]): """Retrieves the block function by name.""" if callable(name): return name if isinstance(name, str): block_funcs = { "FuseMBConv": FuseMBConv, "MBConv": MBConv, } if name in block_funcs.keys(): return block_funcs[name] raise ValueError(f"Block '{name}' not supported") def _build_effnet(**kwargs): model_args = dict( stem_w=32, block_name=MBConv, depths=[1, 2, 2, 3, 3, 4, 1], widths=[16, 24, 40, 80, 112, 192, 320], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 5, 3, 5, 5, 3], exp_rs=[1, 6, 6, 6, 6, 6, 6], se_rs=0.25, drop_path_prob=0.2, head=dict(name="ClsHead", width=1280, dropout_prob=0.2), ) recursive_update(model_args, kwargs) return EffNet(**model_args) def _build_effnet_lite(**kwargs): model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6") recursive_update(model_args, kwargs) return _build_effnet(**model_args) def _build_effnetv2(**kwargs): model_args = dict( stem_w=32, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv], depths=[1, 2, 2, 3, 5, 8], widths=[16, 32, 48, 96, 112, 192], strides=[1, 2, 2, 2, 1, 2], kernels=[3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25], ) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl" ) def effnet_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl" ) def effnet_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl" ) def effnet_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3/effnet_b3.pkl" ) def effnet_b3(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4/effnet_b4.pkl" ) def effnet_b4(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b5/effnet_b5.pkl" ) def effnet_b5(**kwargs): model_args = dict(depth_mult=2.2, width_mult=1.6, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b6/effnet_b6.pkl" ) def effnet_b6(**kwargs): model_args = dict(depth_mult=2.6, width_mult=1.8, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b7/effnet_b7.pkl" ) def effnet_b7(**kwargs): model_args = dict(depth_mult=3.1, width_mult=2.0, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b8/effnet_b8.pkl" ) def effnet_b8(**kwargs): model_args = dict(depth_mult=3.6, width_mult=2.2, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_l2/effnet_l2.pkl" ) def effnet_l2(**kwargs): model_args = dict(depth_mult=5.3, width_mult=4.3, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0_lite/effnet_b0_lite.pkl" ) def effnet_b0_lite(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1_lite/effnet_b1_lite.pkl" ) def effnet_b1_lite(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2_lite/effnet_b2_lite.pkl" ) def effnet_b2_lite(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3_lite/effnet_b3_lite.pkl" ) def effnet_b3_lite(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4_lite/effnet_b4_lite.pkl" ) def effnet_b4_lite(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b0/effnetv2_b0.pkl" ) def effnetv2_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnetv2(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b1/effnetv2_b1.pkl" ) def effnetv2_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnetv2(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b2/effnetv2_b2.pkl" ) def effnetv2_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnetv2(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b3/effnetv2_b3.pkl" ) def effnetv2_b3(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnetv2(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_s/effnetv2_s.pkl" ) def effnetv2_s(**kwargs): model_args = dict(stem_w=24, depths=[2, 4, 4, 6, 9, 15], widths=[24, 48, 64, 128, 160, 256]) recursive_update(model_args, kwargs) return _build_effnetv2(**model_args) @registers.models.register() @
hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_m/effnetv2_m.pkl" )
megengine.hub.pretrained
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020 <NAME> # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """EfficientNet Series EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_ References: https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py """ import math from numbers import Real from typing import Any, Callable, Mapping, Sequence, Union import megengine.hub as hub import megengine.module as M from basecls.layers import ( SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d, ) from basecls.utils import recursive_update, registers from .mbnet import MBConv from .resnet import AnyStage, SimpleStem __all__ = ["FuseMBConv", "EffNet"] class FuseMBConv(M.Module): """Fusing the proj conv1x1 and depthwise conv into a conv2d. Args: w_in: input width. w_out: output width. stride: stride of conv. kernel: kernel of conv. exp_r: expansion ratio. se_r: SE ratio. has_skip: whether apply skip connection. drop_path_prob: drop path probability. norm_name: normalization function. act_name: activation function. """ def __init__( self, w_in: int, w_out: int, stride: int, kernel: int, exp_r: float, se_r: float, has_skip: bool, drop_path_prob: float, norm_name: str, act_name: str, **kwargs, ): super().__init__() # Expansion w_mid = w_in w_exp = int(w_in * exp_r) if exp_r != 1.0: self.exp = conv2d(w_in, w_exp, kernel, stride=stride) self.exp_bn = norm2d(norm_name, w_exp) self.exp_act = activation(act_name) w_mid = w_exp # SE if se_r > 0.0: w_se = int(w_in * se_r) self.se = SE(w_mid, w_se, act_name) # PWConv self.proj = conv2d( w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride ) self.proj_bn = norm2d(norm_name, w_out) self.has_proj_act = exp_r == 1.0 if self.has_proj_act: self.proj_act = activation(act_name) # Skip self.has_skip = has_skip and w_in == w_out and stride == 1 if self.has_skip: self.drop_path = DropPath(drop_path_prob) def forward(self, x): x_p = x if getattr(self, "exp", None) is not None: x = self.exp(x) x = self.exp_bn(x) x = self.exp_act(x) if getattr(self, "se", None) is not None: x = self.se(x) x = self.proj(x) x = self.proj_bn(x) if self.has_proj_act: x = self.proj_act(x) if self.has_skip: x = self.drop_path(x) x = x + x_p return x @registers.models.register() class EffNet(M.Module): """EfficientNet model. Args: stem_w: stem width. block_name: block name. depths: depth for each stage (number of blocks in the stage). widths: width for each stage (width of each block in the stage). strides: strides for each stage (applies to the first block of each stage). kernels: kernel sizes for each stage. exp_rs: expansion ratios for MBConv blocks in each stage. se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25`` drop_path_prob: drop path probability. Default: ``0.0`` depth_mult: depth multiplier. Default: ``1.0`` width_mult: width multiplier. Default: ``1.0`` omit_mult: omit multiplier for stem width, head width, the first stage depth and the last stage depth, enabled in EfficientNet-Lite. Default: ``False`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"silu"`` head: head args. Default: ``None`` """ def __init__( self, stem_w: int, block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]], depths: Sequence[int], widths: Sequence[int], strides: Sequence[int], kernels: Sequence[int], exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0, se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0, drop_path_prob: float = 0.0, depth_mult: float = 1.0, width_mult: float = 1.0, omit_mult: bool = False, norm_name: str = "BN", act_name: str = "silu", head: Mapping[str, Any] = None, ): super().__init__() depths = [ d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult) for i, d in enumerate(depths) ] self.depths = depths stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9) self.stem = SimpleStem(3, stem_w, norm_name, act_name) if isinstance(block_name, (str, Callable)): block_name = [block_name] * len(depths) block_func = [self.get_block_func(bn) for bn in block_name] widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths] if isinstance(exp_rs, Real): exp_rs = [exp_rs] * len(depths) if isinstance(se_rs, Real): se_rs = [se_rs] * len(depths) drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths))) drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths] model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs] prev_w = stem_w for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)): stage = AnyStage( prev_w, w, s, d, bf, kernel=k, exp_r=exp_r, se_r=se_r, se_from_exp=False, se_act_name=act_name, se_approx=False, se_rd_fn=int, has_proj_act=False, has_skip=True, drop_path_prob=dp_p, norm_name=norm_name, act_name=act_name, ) setattr(self, f"s{i + 1}", stage) prev_w = w if head: if head.get("width", 0) > 0 and not omit_mult: head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9) self.head = build_head(prev_w, head, norm_name, act_name) self.apply(init_weights) def forward(self, x): x = self.stem(x) for i in range(len(self.depths)): stage = getattr(self, f"s{i + 1}") x = stage(x) if getattr(self, "head", None) is not None: x = self.head(x) return x @staticmethod def get_block_func(name: Union[str, Callable]): """Retrieves the block function by name.""" if callable(name): return name if isinstance(name, str): block_funcs = { "FuseMBConv": FuseMBConv, "MBConv": MBConv, } if name in block_funcs.keys(): return block_funcs[name] raise ValueError(f"Block '{name}' not supported") def _build_effnet(**kwargs): model_args = dict( stem_w=32, block_name=MBConv, depths=[1, 2, 2, 3, 3, 4, 1], widths=[16, 24, 40, 80, 112, 192, 320], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 5, 3, 5, 5, 3], exp_rs=[1, 6, 6, 6, 6, 6, 6], se_rs=0.25, drop_path_prob=0.2, head=dict(name="ClsHead", width=1280, dropout_prob=0.2), ) recursive_update(model_args, kwargs) return EffNet(**model_args) def _build_effnet_lite(**kwargs): model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6") recursive_update(model_args, kwargs) return _build_effnet(**model_args) def _build_effnetv2(**kwargs): model_args = dict( stem_w=32, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv], depths=[1, 2, 2, 3, 5, 8], widths=[16, 32, 48, 96, 112, 192], strides=[1, 2, 2, 2, 1, 2], kernels=[3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25], ) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl" ) def effnet_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl" ) def effnet_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl" ) def effnet_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3/effnet_b3.pkl" ) def effnet_b3(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4/effnet_b4.pkl" ) def effnet_b4(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b5/effnet_b5.pkl" ) def effnet_b5(**kwargs): model_args = dict(depth_mult=2.2, width_mult=1.6, head=dict(dropout_prob=0.4)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b6/effnet_b6.pkl" ) def effnet_b6(**kwargs): model_args = dict(depth_mult=2.6, width_mult=1.8, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b7/effnet_b7.pkl" ) def effnet_b7(**kwargs): model_args = dict(depth_mult=3.1, width_mult=2.0, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b8/effnet_b8.pkl" ) def effnet_b8(**kwargs): model_args = dict(depth_mult=3.6, width_mult=2.2, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_l2/effnet_l2.pkl" ) def effnet_l2(**kwargs): model_args = dict(depth_mult=5.3, width_mult=4.3, head=dict(dropout_prob=0.5)) recursive_update(model_args, kwargs) return _build_effnet(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0_lite/effnet_b0_lite.pkl" ) def effnet_b0_lite(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1_lite/effnet_b1_lite.pkl" ) def effnet_b1_lite(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2_lite/effnet_b2_lite.pkl" ) def effnet_b2_lite(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3_lite/effnet_b3_lite.pkl" ) def effnet_b3_lite(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4_lite/effnet_b4_lite.pkl" ) def effnet_b4_lite(**kwargs): model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnet_lite(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b0/effnetv2_b0.pkl" ) def effnetv2_b0(**kwargs): model_args = dict(depth_mult=1.0, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnetv2(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b1/effnetv2_b1.pkl" ) def effnetv2_b1(**kwargs): model_args = dict(depth_mult=1.1, width_mult=1.0) recursive_update(model_args, kwargs) return _build_effnetv2(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b2/effnetv2_b2.pkl" ) def effnetv2_b2(**kwargs): model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnetv2(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b3/effnetv2_b3.pkl" ) def effnetv2_b3(**kwargs): model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3)) recursive_update(model_args, kwargs) return _build_effnetv2(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_s/effnetv2_s.pkl" ) def effnetv2_s(**kwargs): model_args = dict(stem_w=24, depths=[2, 4, 4, 6, 9, 15], widths=[24, 48, 64, 128, 160, 256]) recursive_update(model_args, kwargs) return _build_effnetv2(**model_args) @registers.models.register() @hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_m/effnetv2_m.pkl" ) def effnetv2_m(**kwargs): model_args = dict( stem_w=24, block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv, MBConv], depths=[3, 5, 5, 7, 14, 18, 5], widths=[24, 48, 80, 160, 176, 304, 512], strides=[1, 2, 2, 2, 1, 2, 1], kernels=[3, 3, 3, 3, 3, 3, 3], exp_rs=[1, 4, 4, 4, 6, 6, 6], se_rs=[0, 0, 0, 0.25, 0.25, 0.25, 0.25], head=dict(dropout_prob=0.3), ) recursive_update(model_args, kwargs) return _build_effnetv2(**model_args) @registers.models.register() @
hub.pretrained( "https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_l/effnetv2_l.pkl" )
megengine.hub.pretrained
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import numpy as np import pytest from megengine import tensor from megengine.module import Conv1d, Conv2d, Conv3d, Linear from megengine.module.init import calculate_fan_in_and_fan_out, fill_ def test_fill_(): x = tensor(np.zeros((2, 3, 4)), dtype=np.float32)
fill_(x, 5.0)
megengine.module.init.fill_
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import numpy as np import pytest from megengine import tensor from megengine.module import Conv1d, Conv2d, Conv3d, Linear from megengine.module.init import calculate_fan_in_and_fan_out, fill_ def test_fill_(): x = tensor(np.zeros((2, 3, 4)), dtype=np.float32) fill_(x, 5.0) np.testing.assert_array_equal( x.numpy(), np.full(shape=(2, 3, 4), fill_value=5.0, dtype=np.float32) ) def test_calculate_fan_in_and_fan_out(): l =
Linear(in_features=3, out_features=8)
megengine.module.Linear
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import numpy as np import pytest from megengine import tensor from megengine.module import Conv1d, Conv2d, Conv3d, Linear from megengine.module.init import calculate_fan_in_and_fan_out, fill_ def test_fill_(): x = tensor(np.zeros((2, 3, 4)), dtype=np.float32) fill_(x, 5.0) np.testing.assert_array_equal( x.numpy(), np.full(shape=(2, 3, 4), fill_value=5.0, dtype=np.float32) ) def test_calculate_fan_in_and_fan_out(): l = Linear(in_features=3, out_features=8) fanin, fanout =
calculate_fan_in_and_fan_out(l.weight)
megengine.module.init.calculate_fan_in_and_fan_out
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import numpy as np import pytest from megengine import tensor from megengine.module import Conv1d, Conv2d, Conv3d, Linear from megengine.module.init import calculate_fan_in_and_fan_out, fill_ def test_fill_(): x = tensor(np.zeros((2, 3, 4)), dtype=np.float32) fill_(x, 5.0) np.testing.assert_array_equal( x.numpy(), np.full(shape=(2, 3, 4), fill_value=5.0, dtype=np.float32) ) def test_calculate_fan_in_and_fan_out(): l = Linear(in_features=3, out_features=8) fanin, fanout = calculate_fan_in_and_fan_out(l.weight) assert fanin == 3 assert fanout == 8 with pytest.raises(ValueError): calculate_fan_in_and_fan_out(l.bias) l =
Conv1d(in_channels=2, out_channels=3, kernel_size=5)
megengine.module.Conv1d
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import numpy as np import pytest from megengine import tensor from megengine.module import Conv1d, Conv2d, Conv3d, Linear from megengine.module.init import calculate_fan_in_and_fan_out, fill_ def test_fill_(): x = tensor(np.zeros((2, 3, 4)), dtype=np.float32) fill_(x, 5.0) np.testing.assert_array_equal( x.numpy(), np.full(shape=(2, 3, 4), fill_value=5.0, dtype=np.float32) ) def test_calculate_fan_in_and_fan_out(): l = Linear(in_features=3, out_features=8) fanin, fanout = calculate_fan_in_and_fan_out(l.weight) assert fanin == 3 assert fanout == 8 with pytest.raises(ValueError): calculate_fan_in_and_fan_out(l.bias) l = Conv1d(in_channels=2, out_channels=3, kernel_size=5) fanin, fanout =
calculate_fan_in_and_fan_out(l.weight)
megengine.module.init.calculate_fan_in_and_fan_out
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import numpy as np import pytest from megengine import tensor from megengine.module import Conv1d, Conv2d, Conv3d, Linear from megengine.module.init import calculate_fan_in_and_fan_out, fill_ def test_fill_(): x = tensor(np.zeros((2, 3, 4)), dtype=np.float32) fill_(x, 5.0) np.testing.assert_array_equal( x.numpy(), np.full(shape=(2, 3, 4), fill_value=5.0, dtype=np.float32) ) def test_calculate_fan_in_and_fan_out(): l = Linear(in_features=3, out_features=8) fanin, fanout = calculate_fan_in_and_fan_out(l.weight) assert fanin == 3 assert fanout == 8 with pytest.raises(ValueError): calculate_fan_in_and_fan_out(l.bias) l = Conv1d(in_channels=2, out_channels=3, kernel_size=5) fanin, fanout = calculate_fan_in_and_fan_out(l.weight) assert fanin == 2 * 5 assert fanout == 3 * 5 # FIXME: will be wrong for group conv1d # l = Conv1d(in_channels=2, out_channels=4, kernel_size=5, groups=2) # fanin, fanout = calculate_fan_in_and_fan_out(l.weight) # assert fanin == 2 // 2 * 5 # assert fanout == 4 // 2 * 5 l =
Conv2d(in_channels=2, out_channels=3, kernel_size=(5, 7))
megengine.module.Conv2d
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import numpy as np import pytest from megengine import tensor from megengine.module import Conv1d, Conv2d, Conv3d, Linear from megengine.module.init import calculate_fan_in_and_fan_out, fill_ def test_fill_(): x = tensor(np.zeros((2, 3, 4)), dtype=np.float32) fill_(x, 5.0) np.testing.assert_array_equal( x.numpy(), np.full(shape=(2, 3, 4), fill_value=5.0, dtype=np.float32) ) def test_calculate_fan_in_and_fan_out(): l = Linear(in_features=3, out_features=8) fanin, fanout = calculate_fan_in_and_fan_out(l.weight) assert fanin == 3 assert fanout == 8 with pytest.raises(ValueError): calculate_fan_in_and_fan_out(l.bias) l = Conv1d(in_channels=2, out_channels=3, kernel_size=5) fanin, fanout = calculate_fan_in_and_fan_out(l.weight) assert fanin == 2 * 5 assert fanout == 3 * 5 # FIXME: will be wrong for group conv1d # l = Conv1d(in_channels=2, out_channels=4, kernel_size=5, groups=2) # fanin, fanout = calculate_fan_in_and_fan_out(l.weight) # assert fanin == 2 // 2 * 5 # assert fanout == 4 // 2 * 5 l = Conv2d(in_channels=2, out_channels=3, kernel_size=(5, 7)) fanin, fanout =
calculate_fan_in_and_fan_out(l.weight)
megengine.module.init.calculate_fan_in_and_fan_out
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import numpy as np import pytest from megengine import tensor from megengine.module import Conv1d, Conv2d, Conv3d, Linear from megengine.module.init import calculate_fan_in_and_fan_out, fill_ def test_fill_(): x = tensor(np.zeros((2, 3, 4)), dtype=np.float32) fill_(x, 5.0) np.testing.assert_array_equal( x.numpy(), np.full(shape=(2, 3, 4), fill_value=5.0, dtype=np.float32) ) def test_calculate_fan_in_and_fan_out(): l = Linear(in_features=3, out_features=8) fanin, fanout = calculate_fan_in_and_fan_out(l.weight) assert fanin == 3 assert fanout == 8 with pytest.raises(ValueError): calculate_fan_in_and_fan_out(l.bias) l = Conv1d(in_channels=2, out_channels=3, kernel_size=5) fanin, fanout = calculate_fan_in_and_fan_out(l.weight) assert fanin == 2 * 5 assert fanout == 3 * 5 # FIXME: will be wrong for group conv1d # l = Conv1d(in_channels=2, out_channels=4, kernel_size=5, groups=2) # fanin, fanout = calculate_fan_in_and_fan_out(l.weight) # assert fanin == 2 // 2 * 5 # assert fanout == 4 // 2 * 5 l = Conv2d(in_channels=2, out_channels=3, kernel_size=(5, 7)) fanin, fanout = calculate_fan_in_and_fan_out(l.weight) assert fanin == 2 * 5 * 7 assert fanout == 3 * 5 * 7 l =
Conv2d(in_channels=2, out_channels=4, kernel_size=(5, 7), groups=2)
megengine.module.Conv2d
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import numpy as np import pytest from megengine import tensor from megengine.module import Conv1d, Conv2d, Conv3d, Linear from megengine.module.init import calculate_fan_in_and_fan_out, fill_ def test_fill_(): x = tensor(np.zeros((2, 3, 4)), dtype=np.float32) fill_(x, 5.0) np.testing.assert_array_equal( x.numpy(), np.full(shape=(2, 3, 4), fill_value=5.0, dtype=np.float32) ) def test_calculate_fan_in_and_fan_out(): l = Linear(in_features=3, out_features=8) fanin, fanout = calculate_fan_in_and_fan_out(l.weight) assert fanin == 3 assert fanout == 8 with pytest.raises(ValueError): calculate_fan_in_and_fan_out(l.bias) l = Conv1d(in_channels=2, out_channels=3, kernel_size=5) fanin, fanout = calculate_fan_in_and_fan_out(l.weight) assert fanin == 2 * 5 assert fanout == 3 * 5 # FIXME: will be wrong for group conv1d # l = Conv1d(in_channels=2, out_channels=4, kernel_size=5, groups=2) # fanin, fanout = calculate_fan_in_and_fan_out(l.weight) # assert fanin == 2 // 2 * 5 # assert fanout == 4 // 2 * 5 l = Conv2d(in_channels=2, out_channels=3, kernel_size=(5, 7)) fanin, fanout = calculate_fan_in_and_fan_out(l.weight) assert fanin == 2 * 5 * 7 assert fanout == 3 * 5 * 7 l = Conv2d(in_channels=2, out_channels=4, kernel_size=(5, 7), groups=2) fanin, fanout =
calculate_fan_in_and_fan_out(l.weight)
megengine.module.init.calculate_fan_in_and_fan_out
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import numpy as np import pytest from megengine import tensor from megengine.module import Conv1d, Conv2d, Conv3d, Linear from megengine.module.init import calculate_fan_in_and_fan_out, fill_ def test_fill_(): x = tensor(np.zeros((2, 3, 4)), dtype=np.float32) fill_(x, 5.0) np.testing.assert_array_equal( x.numpy(), np.full(shape=(2, 3, 4), fill_value=5.0, dtype=np.float32) ) def test_calculate_fan_in_and_fan_out(): l = Linear(in_features=3, out_features=8) fanin, fanout = calculate_fan_in_and_fan_out(l.weight) assert fanin == 3 assert fanout == 8 with pytest.raises(ValueError): calculate_fan_in_and_fan_out(l.bias) l = Conv1d(in_channels=2, out_channels=3, kernel_size=5) fanin, fanout = calculate_fan_in_and_fan_out(l.weight) assert fanin == 2 * 5 assert fanout == 3 * 5 # FIXME: will be wrong for group conv1d # l = Conv1d(in_channels=2, out_channels=4, kernel_size=5, groups=2) # fanin, fanout = calculate_fan_in_and_fan_out(l.weight) # assert fanin == 2 // 2 * 5 # assert fanout == 4 // 2 * 5 l = Conv2d(in_channels=2, out_channels=3, kernel_size=(5, 7)) fanin, fanout = calculate_fan_in_and_fan_out(l.weight) assert fanin == 2 * 5 * 7 assert fanout == 3 * 5 * 7 l = Conv2d(in_channels=2, out_channels=4, kernel_size=(5, 7), groups=2) fanin, fanout = calculate_fan_in_and_fan_out(l.weight) assert fanin == 2 // 2 * 5 * 7 assert fanout == 4 // 2 * 5 * 7 # FIXME: will be wrong for conv3d # l = Conv3d(in_channels=2, out_channels=3, kernel_size=(5, 7, 9)) # fanin, fanout = calculate_fan_in_and_fan_out(l.weight) # assert fanin == 2 * 5 * 7 * 9 # assert fanout == 3 * 5 * 7 * 9 l =
Conv3d(in_channels=2, out_channels=4, kernel_size=(5, 7, 9), groups=2)
megengine.module.Conv3d
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import numpy as np import pytest from megengine import tensor from megengine.module import Conv1d, Conv2d, Conv3d, Linear from megengine.module.init import calculate_fan_in_and_fan_out, fill_ def test_fill_(): x = tensor(np.zeros((2, 3, 4)), dtype=np.float32) fill_(x, 5.0) np.testing.assert_array_equal( x.numpy(), np.full(shape=(2, 3, 4), fill_value=5.0, dtype=np.float32) ) def test_calculate_fan_in_and_fan_out(): l = Linear(in_features=3, out_features=8) fanin, fanout = calculate_fan_in_and_fan_out(l.weight) assert fanin == 3 assert fanout == 8 with pytest.raises(ValueError): calculate_fan_in_and_fan_out(l.bias) l = Conv1d(in_channels=2, out_channels=3, kernel_size=5) fanin, fanout = calculate_fan_in_and_fan_out(l.weight) assert fanin == 2 * 5 assert fanout == 3 * 5 # FIXME: will be wrong for group conv1d # l = Conv1d(in_channels=2, out_channels=4, kernel_size=5, groups=2) # fanin, fanout = calculate_fan_in_and_fan_out(l.weight) # assert fanin == 2 // 2 * 5 # assert fanout == 4 // 2 * 5 l = Conv2d(in_channels=2, out_channels=3, kernel_size=(5, 7)) fanin, fanout = calculate_fan_in_and_fan_out(l.weight) assert fanin == 2 * 5 * 7 assert fanout == 3 * 5 * 7 l = Conv2d(in_channels=2, out_channels=4, kernel_size=(5, 7), groups=2) fanin, fanout = calculate_fan_in_and_fan_out(l.weight) assert fanin == 2 // 2 * 5 * 7 assert fanout == 4 // 2 * 5 * 7 # FIXME: will be wrong for conv3d # l = Conv3d(in_channels=2, out_channels=3, kernel_size=(5, 7, 9)) # fanin, fanout = calculate_fan_in_and_fan_out(l.weight) # assert fanin == 2 * 5 * 7 * 9 # assert fanout == 3 * 5 * 7 * 9 l = Conv3d(in_channels=2, out_channels=4, kernel_size=(5, 7, 9), groups=2) fanin, fanout =
calculate_fan_in_and_fan_out(l.weight)
megengine.module.init.calculate_fan_in_and_fan_out
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import numpy as np import pytest from megengine import tensor from megengine.module import Conv1d, Conv2d, Conv3d, Linear from megengine.module.init import calculate_fan_in_and_fan_out, fill_ def test_fill_(): x = tensor(np.zeros((2, 3, 4)), dtype=np.float32) fill_(x, 5.0) np.testing.assert_array_equal( x.numpy(), np.full(shape=(2, 3, 4), fill_value=5.0, dtype=np.float32) ) def test_calculate_fan_in_and_fan_out(): l = Linear(in_features=3, out_features=8) fanin, fanout = calculate_fan_in_and_fan_out(l.weight) assert fanin == 3 assert fanout == 8 with pytest.raises(ValueError):
calculate_fan_in_and_fan_out(l.bias)
megengine.module.init.calculate_fan_in_and_fan_out
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x =
Tensor(0)
megengine.tensor.Tensor
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @
trace(symbolic=True, capture_as_const=True)
megengine.jit.tracing.trace
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph =
Net.load(orig_model)
megengine.utils.network.Network.load
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph =
Net.load(orig_model)
megengine.utils.network.Network.load
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph =
Net.load(orig_model)
megengine.utils.network.Network.load
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a =
Tensor([1, 2])
megengine.tensor.Tensor
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b =
Tensor([3, 4])
megengine.tensor.Tensor
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @
trace(symbolic=True, capture_as_const=True)
megengine.jit.tracing.trace
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph =
Net.load(orig_model)
megengine.utils.network.Network.load
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out =
F.mul(vara, varb)
megengine.functional.mul
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out =
F.relu(out)
megengine.functional.relu
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph =
GraphInference(modified_model)
megengine.utils.comp_graph_tools.GraphInference
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a =
Tensor([1, 2])
megengine.tensor.Tensor
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a = Tensor([1, 2]) b =
Tensor([3, 4])
megengine.tensor.Tensor
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a = Tensor([1, 2]) b = Tensor([3, 4]) @
trace(symbolic=True, capture_as_const=True)
megengine.jit.tracing.trace
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph =
Net.load(orig_model)
megengine.utils.network.Network.load
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out1 =
F.sub(vara, varb)
megengine.functional.sub
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out1 = F.sub(vara, varb) out1 =
F.relu(out1)
megengine.functional.relu
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out1 = F.sub(vara, varb) out1 = F.relu(out1) out1 = graph.add_dep_oprs(out1) orig_opr = graph.opr_filter.has_input(vara).as_unique() repl_dict = {orig_opr: out1[0].owner} graph.replace_oprs(repl_dict) modified_model1 = io.BytesIO() graph.dump(modified_model1) modified_model1.seek(0) load_graph =
GraphInference(modified_model1)
megengine.utils.comp_graph_tools.GraphInference
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out1 = F.sub(vara, varb) out1 = F.relu(out1) out1 = graph.add_dep_oprs(out1) orig_opr = graph.opr_filter.has_input(vara).as_unique() repl_dict = {orig_opr: out1[0].owner} graph.replace_oprs(repl_dict) modified_model1 = io.BytesIO() graph.dump(modified_model1) modified_model1.seek(0) load_graph = GraphInference(modified_model1) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [0, 0]) def test_splice_network(): x =
F.ones((2,))
megengine.functional.ones
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out1 = F.sub(vara, varb) out1 = F.relu(out1) out1 = graph.add_dep_oprs(out1) orig_opr = graph.opr_filter.has_input(vara).as_unique() repl_dict = {orig_opr: out1[0].owner} graph.replace_oprs(repl_dict) modified_model1 = io.BytesIO() graph.dump(modified_model1) modified_model1.seek(0) load_graph = GraphInference(modified_model1) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [0, 0]) def test_splice_network(): x = F.ones((2,)) y =
F.ones((2,))
megengine.functional.ones
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out1 = F.sub(vara, varb) out1 = F.relu(out1) out1 = graph.add_dep_oprs(out1) orig_opr = graph.opr_filter.has_input(vara).as_unique() repl_dict = {orig_opr: out1[0].owner} graph.replace_oprs(repl_dict) modified_model1 = io.BytesIO() graph.dump(modified_model1) modified_model1.seek(0) load_graph = GraphInference(modified_model1) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [0, 0]) def test_splice_network(): x = F.ones((2,)) y = F.ones((2,)) @
trace(symbolic=True, capture_as_const=True)
megengine.jit.tracing.trace
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out1 = F.sub(vara, varb) out1 = F.relu(out1) out1 = graph.add_dep_oprs(out1) orig_opr = graph.opr_filter.has_input(vara).as_unique() repl_dict = {orig_opr: out1[0].owner} graph.replace_oprs(repl_dict) modified_model1 = io.BytesIO() graph.dump(modified_model1) modified_model1.seek(0) load_graph = GraphInference(modified_model1) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [0, 0]) def test_splice_network(): x = F.ones((2,)) y = F.ones((2,)) @trace(symbolic=True, capture_as_const=True) def fun1(a, b): return (a + b) * 2 @
trace(symbolic=True, capture_as_const=True)
megengine.jit.tracing.trace
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out1 = F.sub(vara, varb) out1 = F.relu(out1) out1 = graph.add_dep_oprs(out1) orig_opr = graph.opr_filter.has_input(vara).as_unique() repl_dict = {orig_opr: out1[0].owner} graph.replace_oprs(repl_dict) modified_model1 = io.BytesIO() graph.dump(modified_model1) modified_model1.seek(0) load_graph = GraphInference(modified_model1) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [0, 0]) def test_splice_network(): x = F.ones((2,)) y = F.ones((2,)) @trace(symbolic=True, capture_as_const=True) def fun1(a, b): return (a + b) * 2 @trace(symbolic=True, capture_as_const=True) def fun2(a): return a * 2 - 1 model = io.BytesIO() fun1(x, y) fun2(x) fun1.dump( model, arg_names=["net1_i0", "net1_i1"], output_names=["net1_o0"], optimize_for_inference=False, ) model.seek(0) net1 =
Net.load(model)
megengine.utils.network.Network.load
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out1 = F.sub(vara, varb) out1 = F.relu(out1) out1 = graph.add_dep_oprs(out1) orig_opr = graph.opr_filter.has_input(vara).as_unique() repl_dict = {orig_opr: out1[0].owner} graph.replace_oprs(repl_dict) modified_model1 = io.BytesIO() graph.dump(modified_model1) modified_model1.seek(0) load_graph = GraphInference(modified_model1) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [0, 0]) def test_splice_network(): x = F.ones((2,)) y = F.ones((2,)) @trace(symbolic=True, capture_as_const=True) def fun1(a, b): return (a + b) * 2 @trace(symbolic=True, capture_as_const=True) def fun2(a): return a * 2 - 1 model = io.BytesIO() fun1(x, y) fun2(x) fun1.dump( model, arg_names=["net1_i0", "net1_i1"], output_names=["net1_o0"], optimize_for_inference=False, ) model.seek(0) net1 = Net.load(model) model.seek(0) fun2.dump( model, arg_names=["net2_i0"], output_names=["net2_o0"], optimize_for_inference=False, ) model.seek(0) net2 =
Net.load(model)
megengine.utils.network.Network.load
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out1 = F.sub(vara, varb) out1 = F.relu(out1) out1 = graph.add_dep_oprs(out1) orig_opr = graph.opr_filter.has_input(vara).as_unique() repl_dict = {orig_opr: out1[0].owner} graph.replace_oprs(repl_dict) modified_model1 = io.BytesIO() graph.dump(modified_model1) modified_model1.seek(0) load_graph = GraphInference(modified_model1) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [0, 0]) def test_splice_network(): x = F.ones((2,)) y = F.ones((2,)) @trace(symbolic=True, capture_as_const=True) def fun1(a, b): return (a + b) * 2 @trace(symbolic=True, capture_as_const=True) def fun2(a): return a * 2 - 1 model = io.BytesIO() fun1(x, y) fun2(x) fun1.dump( model, arg_names=["net1_i0", "net1_i1"], output_names=["net1_o0"], optimize_for_inference=False, ) model.seek(0) net1 = Net.load(model) model.seek(0) fun2.dump( model, arg_names=["net2_i0"], output_names=["net2_o0"], optimize_for_inference=False, ) model.seek(0) net2 = Net.load(model) net1.add_output(*net2.output_vars) var = net1.var_filter.name("net1_i0").as_unique() repl_var = net2.var_filter.name("net2_o0").as_unique() net1.replace_vars({var: repl_var}) assert "net1_i0" not in [var.name for var in net1.all_vars] assert "net2_i0" in [var.name for var in net1.all_vars] model.seek(0) net1.dump(model, keep_var_name=2, optimize_for_inference=False) model.seek(0) net =
Net.load(model)
megengine.utils.network.Network.load
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out1 = F.sub(vara, varb) out1 = F.relu(out1) out1 = graph.add_dep_oprs(out1) orig_opr = graph.opr_filter.has_input(vara).as_unique() repl_dict = {orig_opr: out1[0].owner} graph.replace_oprs(repl_dict) modified_model1 = io.BytesIO() graph.dump(modified_model1) modified_model1.seek(0) load_graph = GraphInference(modified_model1) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [0, 0]) def test_splice_network(): x = F.ones((2,)) y = F.ones((2,)) @trace(symbolic=True, capture_as_const=True) def fun1(a, b): return (a + b) * 2 @trace(symbolic=True, capture_as_const=True) def fun2(a): return a * 2 - 1 model = io.BytesIO() fun1(x, y) fun2(x) fun1.dump( model, arg_names=["net1_i0", "net1_i1"], output_names=["net1_o0"], optimize_for_inference=False, ) model.seek(0) net1 = Net.load(model) model.seek(0) fun2.dump( model, arg_names=["net2_i0"], output_names=["net2_o0"], optimize_for_inference=False, ) model.seek(0) net2 = Net.load(model) net1.add_output(*net2.output_vars) var = net1.var_filter.name("net1_i0").as_unique() repl_var = net2.var_filter.name("net2_o0").as_unique() net1.replace_vars({var: repl_var}) assert "net1_i0" not in [var.name for var in net1.all_vars] assert "net2_i0" in [var.name for var in net1.all_vars] model.seek(0) net1.dump(model, keep_var_name=2, optimize_for_inference=False) model.seek(0) net = Net.load(model) assert "net1_i0" not in [var.name for var in net.all_vars] assert "net2_i0" in [var.name for var in net.all_vars] def test_modify_params(): a =
Tensor([1, 2])
megengine.tensor.Tensor
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out1 = F.sub(vara, varb) out1 = F.relu(out1) out1 = graph.add_dep_oprs(out1) orig_opr = graph.opr_filter.has_input(vara).as_unique() repl_dict = {orig_opr: out1[0].owner} graph.replace_oprs(repl_dict) modified_model1 = io.BytesIO() graph.dump(modified_model1) modified_model1.seek(0) load_graph = GraphInference(modified_model1) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [0, 0]) def test_splice_network(): x = F.ones((2,)) y = F.ones((2,)) @trace(symbolic=True, capture_as_const=True) def fun1(a, b): return (a + b) * 2 @trace(symbolic=True, capture_as_const=True) def fun2(a): return a * 2 - 1 model = io.BytesIO() fun1(x, y) fun2(x) fun1.dump( model, arg_names=["net1_i0", "net1_i1"], output_names=["net1_o0"], optimize_for_inference=False, ) model.seek(0) net1 = Net.load(model) model.seek(0) fun2.dump( model, arg_names=["net2_i0"], output_names=["net2_o0"], optimize_for_inference=False, ) model.seek(0) net2 = Net.load(model) net1.add_output(*net2.output_vars) var = net1.var_filter.name("net1_i0").as_unique() repl_var = net2.var_filter.name("net2_o0").as_unique() net1.replace_vars({var: repl_var}) assert "net1_i0" not in [var.name for var in net1.all_vars] assert "net2_i0" in [var.name for var in net1.all_vars] model.seek(0) net1.dump(model, keep_var_name=2, optimize_for_inference=False) model.seek(0) net = Net.load(model) assert "net1_i0" not in [var.name for var in net.all_vars] assert "net2_i0" in [var.name for var in net.all_vars] def test_modify_params(): a = Tensor([1, 2]) b =
Tensor([3, 4])
megengine.tensor.Tensor
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out1 = F.sub(vara, varb) out1 = F.relu(out1) out1 = graph.add_dep_oprs(out1) orig_opr = graph.opr_filter.has_input(vara).as_unique() repl_dict = {orig_opr: out1[0].owner} graph.replace_oprs(repl_dict) modified_model1 = io.BytesIO() graph.dump(modified_model1) modified_model1.seek(0) load_graph = GraphInference(modified_model1) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [0, 0]) def test_splice_network(): x = F.ones((2,)) y = F.ones((2,)) @trace(symbolic=True, capture_as_const=True) def fun1(a, b): return (a + b) * 2 @trace(symbolic=True, capture_as_const=True) def fun2(a): return a * 2 - 1 model = io.BytesIO() fun1(x, y) fun2(x) fun1.dump( model, arg_names=["net1_i0", "net1_i1"], output_names=["net1_o0"], optimize_for_inference=False, ) model.seek(0) net1 = Net.load(model) model.seek(0) fun2.dump( model, arg_names=["net2_i0"], output_names=["net2_o0"], optimize_for_inference=False, ) model.seek(0) net2 = Net.load(model) net1.add_output(*net2.output_vars) var = net1.var_filter.name("net1_i0").as_unique() repl_var = net2.var_filter.name("net2_o0").as_unique() net1.replace_vars({var: repl_var}) assert "net1_i0" not in [var.name for var in net1.all_vars] assert "net2_i0" in [var.name for var in net1.all_vars] model.seek(0) net1.dump(model, keep_var_name=2, optimize_for_inference=False) model.seek(0) net = Net.load(model) assert "net1_i0" not in [var.name for var in net.all_vars] assert "net2_i0" in [var.name for var in net.all_vars] def test_modify_params(): a = Tensor([1, 2]) b = Tensor([3, 4]) @
trace(symbolic=True, capture_as_const=True)
megengine.jit.tracing.trace
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out1 = F.sub(vara, varb) out1 = F.relu(out1) out1 = graph.add_dep_oprs(out1) orig_opr = graph.opr_filter.has_input(vara).as_unique() repl_dict = {orig_opr: out1[0].owner} graph.replace_oprs(repl_dict) modified_model1 = io.BytesIO() graph.dump(modified_model1) modified_model1.seek(0) load_graph = GraphInference(modified_model1) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [0, 0]) def test_splice_network(): x = F.ones((2,)) y = F.ones((2,)) @trace(symbolic=True, capture_as_const=True) def fun1(a, b): return (a + b) * 2 @trace(symbolic=True, capture_as_const=True) def fun2(a): return a * 2 - 1 model = io.BytesIO() fun1(x, y) fun2(x) fun1.dump( model, arg_names=["net1_i0", "net1_i1"], output_names=["net1_o0"], optimize_for_inference=False, ) model.seek(0) net1 = Net.load(model) model.seek(0) fun2.dump( model, arg_names=["net2_i0"], output_names=["net2_o0"], optimize_for_inference=False, ) model.seek(0) net2 = Net.load(model) net1.add_output(*net2.output_vars) var = net1.var_filter.name("net1_i0").as_unique() repl_var = net2.var_filter.name("net2_o0").as_unique() net1.replace_vars({var: repl_var}) assert "net1_i0" not in [var.name for var in net1.all_vars] assert "net2_i0" in [var.name for var in net1.all_vars] model.seek(0) net1.dump(model, keep_var_name=2, optimize_for_inference=False) model.seek(0) net = Net.load(model) assert "net1_i0" not in [var.name for var in net.all_vars] assert "net2_i0" in [var.name for var in net.all_vars] def test_modify_params(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph =
Net.load(orig_model)
megengine.utils.network.Network.load
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out1 = F.sub(vara, varb) out1 = F.relu(out1) out1 = graph.add_dep_oprs(out1) orig_opr = graph.opr_filter.has_input(vara).as_unique() repl_dict = {orig_opr: out1[0].owner} graph.replace_oprs(repl_dict) modified_model1 = io.BytesIO() graph.dump(modified_model1) modified_model1.seek(0) load_graph = GraphInference(modified_model1) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [0, 0]) def test_splice_network(): x = F.ones((2,)) y = F.ones((2,)) @trace(symbolic=True, capture_as_const=True) def fun1(a, b): return (a + b) * 2 @trace(symbolic=True, capture_as_const=True) def fun2(a): return a * 2 - 1 model = io.BytesIO() fun1(x, y) fun2(x) fun1.dump( model, arg_names=["net1_i0", "net1_i1"], output_names=["net1_o0"], optimize_for_inference=False, ) model.seek(0) net1 = Net.load(model) model.seek(0) fun2.dump( model, arg_names=["net2_i0"], output_names=["net2_o0"], optimize_for_inference=False, ) model.seek(0) net2 = Net.load(model) net1.add_output(*net2.output_vars) var = net1.var_filter.name("net1_i0").as_unique() repl_var = net2.var_filter.name("net2_o0").as_unique() net1.replace_vars({var: repl_var}) assert "net1_i0" not in [var.name for var in net1.all_vars] assert "net2_i0" in [var.name for var in net1.all_vars] model.seek(0) net1.dump(model, keep_var_name=2, optimize_for_inference=False) model.seek(0) net = Net.load(model) assert "net1_i0" not in [var.name for var in net.all_vars] assert "net2_i0" in [var.name for var in net.all_vars] def test_modify_params(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) param_const = graph.params_filter.as_unique() param_const.set_value(3) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph =
GraphInference(modified_model)
megengine.utils.comp_graph_tools.GraphInference
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out1 = F.sub(vara, varb) out1 = F.relu(out1) out1 = graph.add_dep_oprs(out1) orig_opr = graph.opr_filter.has_input(vara).as_unique() repl_dict = {orig_opr: out1[0].owner} graph.replace_oprs(repl_dict) modified_model1 = io.BytesIO() graph.dump(modified_model1) modified_model1.seek(0) load_graph = GraphInference(modified_model1) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [0, 0]) def test_splice_network(): x = F.ones((2,)) y = F.ones((2,)) @trace(symbolic=True, capture_as_const=True) def fun1(a, b): return (a + b) * 2 @trace(symbolic=True, capture_as_const=True) def fun2(a): return a * 2 - 1 model = io.BytesIO() fun1(x, y) fun2(x) fun1.dump( model, arg_names=["net1_i0", "net1_i1"], output_names=["net1_o0"], optimize_for_inference=False, ) model.seek(0) net1 = Net.load(model) model.seek(0) fun2.dump( model, arg_names=["net2_i0"], output_names=["net2_o0"], optimize_for_inference=False, ) model.seek(0) net2 = Net.load(model) net1.add_output(*net2.output_vars) var = net1.var_filter.name("net1_i0").as_unique() repl_var = net2.var_filter.name("net2_o0").as_unique() net1.replace_vars({var: repl_var}) assert "net1_i0" not in [var.name for var in net1.all_vars] assert "net2_i0" in [var.name for var in net1.all_vars] model.seek(0) net1.dump(model, keep_var_name=2, optimize_for_inference=False) model.seek(0) net = Net.load(model) assert "net1_i0" not in [var.name for var in net.all_vars] assert "net2_i0" in [var.name for var in net.all_vars] def test_modify_params(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) param_const = graph.params_filter.as_unique() param_const.set_value(3) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [12, 18]) def test_make_const(): a =
Tensor([1, 2])
megengine.tensor.Tensor
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out1 = F.sub(vara, varb) out1 = F.relu(out1) out1 = graph.add_dep_oprs(out1) orig_opr = graph.opr_filter.has_input(vara).as_unique() repl_dict = {orig_opr: out1[0].owner} graph.replace_oprs(repl_dict) modified_model1 = io.BytesIO() graph.dump(modified_model1) modified_model1.seek(0) load_graph = GraphInference(modified_model1) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [0, 0]) def test_splice_network(): x = F.ones((2,)) y = F.ones((2,)) @trace(symbolic=True, capture_as_const=True) def fun1(a, b): return (a + b) * 2 @trace(symbolic=True, capture_as_const=True) def fun2(a): return a * 2 - 1 model = io.BytesIO() fun1(x, y) fun2(x) fun1.dump( model, arg_names=["net1_i0", "net1_i1"], output_names=["net1_o0"], optimize_for_inference=False, ) model.seek(0) net1 = Net.load(model) model.seek(0) fun2.dump( model, arg_names=["net2_i0"], output_names=["net2_o0"], optimize_for_inference=False, ) model.seek(0) net2 = Net.load(model) net1.add_output(*net2.output_vars) var = net1.var_filter.name("net1_i0").as_unique() repl_var = net2.var_filter.name("net2_o0").as_unique() net1.replace_vars({var: repl_var}) assert "net1_i0" not in [var.name for var in net1.all_vars] assert "net2_i0" in [var.name for var in net1.all_vars] model.seek(0) net1.dump(model, keep_var_name=2, optimize_for_inference=False) model.seek(0) net = Net.load(model) assert "net1_i0" not in [var.name for var in net.all_vars] assert "net2_i0" in [var.name for var in net.all_vars] def test_modify_params(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) param_const = graph.params_filter.as_unique() param_const.set_value(3) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [12, 18]) def test_make_const(): a = Tensor([1, 2]) b =
Tensor([3, 4])
megengine.tensor.Tensor
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out1 = F.sub(vara, varb) out1 = F.relu(out1) out1 = graph.add_dep_oprs(out1) orig_opr = graph.opr_filter.has_input(vara).as_unique() repl_dict = {orig_opr: out1[0].owner} graph.replace_oprs(repl_dict) modified_model1 = io.BytesIO() graph.dump(modified_model1) modified_model1.seek(0) load_graph = GraphInference(modified_model1) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [0, 0]) def test_splice_network(): x = F.ones((2,)) y = F.ones((2,)) @trace(symbolic=True, capture_as_const=True) def fun1(a, b): return (a + b) * 2 @trace(symbolic=True, capture_as_const=True) def fun2(a): return a * 2 - 1 model = io.BytesIO() fun1(x, y) fun2(x) fun1.dump( model, arg_names=["net1_i0", "net1_i1"], output_names=["net1_o0"], optimize_for_inference=False, ) model.seek(0) net1 = Net.load(model) model.seek(0) fun2.dump( model, arg_names=["net2_i0"], output_names=["net2_o0"], optimize_for_inference=False, ) model.seek(0) net2 = Net.load(model) net1.add_output(*net2.output_vars) var = net1.var_filter.name("net1_i0").as_unique() repl_var = net2.var_filter.name("net2_o0").as_unique() net1.replace_vars({var: repl_var}) assert "net1_i0" not in [var.name for var in net1.all_vars] assert "net2_i0" in [var.name for var in net1.all_vars] model.seek(0) net1.dump(model, keep_var_name=2, optimize_for_inference=False) model.seek(0) net = Net.load(model) assert "net1_i0" not in [var.name for var in net.all_vars] assert "net2_i0" in [var.name for var in net.all_vars] def test_modify_params(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) param_const = graph.params_filter.as_unique() param_const.set_value(3) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [12, 18]) def test_make_const(): a = Tensor([1, 2]) b = Tensor([3, 4]) @
trace(symbolic=True, capture_as_const=True)
megengine.jit.tracing.trace
import io import numpy as np import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.module as M import megengine.utils.network_node as N from megengine.jit.tracing import trace from megengine.tensor import Tensor from megengine.utils.comp_graph_tools import GraphInference from megengine.utils.network import Network as Net from megengine.utils.network import as_oprnode, set_symbolic_shape from megengine.utils.network_node import Host2DeviceCopy, VarNode def test_metadata(): x = Tensor(0) @trace(symbolic=True, capture_as_const=True) def fwd(x): return x * 2 fwd(x) orig_model = io.BytesIO() fwd.dump(orig_model, user_info="test", optimize_for_inference=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": "test", "graph_modified": False, # False: tracing.dump "optimized_for_inference": False, } orig_model.seek(0) graph.dump( orig_model, user_info={"str": "x", "tensor": x, "module": M.Module, "none": None}, optimize_for_inference=True, enable_nchw4=True, enable_ioc16=True, ) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata == { "user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None}, "graph_modified": True, # True: Network.dump "optimized_for_inference": True, "enable_nchw4": True, "enable_ioc16": True, } orig_model.seek(0) fwd.dump(orig_model, enable_metadata=False) orig_model.seek(0) graph = Net.load(orig_model) assert graph.metadata is None def test_replace_var(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out = F.mul(vara, varb) out = F.relu(out) opnode = list(graph.opr_filter.has_input(vara)) repl_dict = {opnode[0].outputs[0]: out} graph.replace_vars(repl_dict) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [6, 16]) def test_replace_opr(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) vara = graph.var_filter.name("a").as_unique() varb = graph.var_filter.name("b").as_unique() out1 = F.sub(vara, varb) out1 = F.relu(out1) out1 = graph.add_dep_oprs(out1) orig_opr = graph.opr_filter.has_input(vara).as_unique() repl_dict = {orig_opr: out1[0].owner} graph.replace_oprs(repl_dict) modified_model1 = io.BytesIO() graph.dump(modified_model1) modified_model1.seek(0) load_graph = GraphInference(modified_model1) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [0, 0]) def test_splice_network(): x = F.ones((2,)) y = F.ones((2,)) @trace(symbolic=True, capture_as_const=True) def fun1(a, b): return (a + b) * 2 @trace(symbolic=True, capture_as_const=True) def fun2(a): return a * 2 - 1 model = io.BytesIO() fun1(x, y) fun2(x) fun1.dump( model, arg_names=["net1_i0", "net1_i1"], output_names=["net1_o0"], optimize_for_inference=False, ) model.seek(0) net1 = Net.load(model) model.seek(0) fun2.dump( model, arg_names=["net2_i0"], output_names=["net2_o0"], optimize_for_inference=False, ) model.seek(0) net2 = Net.load(model) net1.add_output(*net2.output_vars) var = net1.var_filter.name("net1_i0").as_unique() repl_var = net2.var_filter.name("net2_o0").as_unique() net1.replace_vars({var: repl_var}) assert "net1_i0" not in [var.name for var in net1.all_vars] assert "net2_i0" in [var.name for var in net1.all_vars] model.seek(0) net1.dump(model, keep_var_name=2, optimize_for_inference=False) model.seek(0) net = Net.load(model) assert "net1_i0" not in [var.name for var in net.all_vars] assert "net2_i0" in [var.name for var in net.all_vars] def test_modify_params(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph = Net.load(orig_model) param_const = graph.params_filter.as_unique() param_const.set_value(3) modified_model = io.BytesIO() graph.dump(modified_model) modified_model.seek(0) load_graph = GraphInference(modified_model) out = load_graph.run(a, b) np.testing.assert_equal(out["o"], [12, 18]) def test_make_const(): a = Tensor([1, 2]) b = Tensor([3, 4]) @trace(symbolic=True, capture_as_const=True) def fwd(a, b): return (a + b) * 2 fwd(a, b) orig_model = io.BytesIO() fwd.dump( orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False ) orig_model.seek(0) graph =
Net.load(orig_model)
megengine.utils.network.Network.load