whisperkittools-a8c3cdeab8da5d76a7b952aa74ffebfbcd44804b generated files: openai_whisper-tiny.en
Browse files- openai_whisper-tiny.en/AudioEncoder.mlmodelc/model.mil +1 -1
- openai_whisper-tiny.en/AudioEncoder.mlmodelc/weights/weight.bin +1 -1
- openai_whisper-tiny.en/MelSpectrogram.mlmodelc/model.mil +1 -1
- openai_whisper-tiny.en/MelSpectrogram.mlmodelc/weights/weight.bin +1 -1
- openai_whisper-tiny.en/TextDecoder.mlmodelc/model.mil +1 -1
- openai_whisper-tiny.en/TextDecoder.mlmodelc/weights/weight.bin +1 -1
openai_whisper-tiny.en/AudioEncoder.mlmodelc/model.mil
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
program(1.0)
|
2 |
-
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "
|
3 |
{
|
4 |
func main<ios16>(tensor<fp16, [1, 80, 1, 3000]> melspectrogram_features) {
|
5 |
tensor<int32, [2]> var_34 = const()[name = tensor<string, []>("op_34"), val = tensor<int32, [2]>([1, 1])];
|
|
|
1 |
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.3.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.2"}})]
|
3 |
{
|
4 |
func main<ios16>(tensor<fp16, [1, 80, 1, 3000]> melspectrogram_features) {
|
5 |
tensor<int32, [2]> var_34 = const()[name = tensor<string, []>("op_34"), val = tensor<int32, [2]>([1, 1])];
|
openai_whisper-tiny.en/AudioEncoder.mlmodelc/weights/weight.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 16422784
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ed7afb1e0f6ba369dfecda9108c2892b2498cd62e8e96ce10714910c421e64eb
|
3 |
size 16422784
|
openai_whisper-tiny.en/MelSpectrogram.mlmodelc/model.mil
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
program(1.0)
|
2 |
-
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "
|
3 |
{
|
4 |
func main<ios16>(tensor<fp16, [480000]> audio) {
|
5 |
tensor<int32, [3]> var_10 = const()[name = tensor<string, []>("op_10"), val = tensor<int32, [3]>([1, 1, 480000])];
|
|
|
1 |
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.3.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.2"}})]
|
3 |
{
|
4 |
func main<ios16>(tensor<fp16, [480000]> audio) {
|
5 |
tensor<int32, [3]> var_10 = const()[name = tensor<string, []>("op_10"), val = tensor<int32, [3]>([1, 1, 480000])];
|
openai_whisper-tiny.en/MelSpectrogram.mlmodelc/weights/weight.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 354080
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:be7067141d5c13b3a805a77ca5a9ee783328932b9c0add16b28a22fd7963897c
|
3 |
size 354080
|
openai_whisper-tiny.en/TextDecoder.mlmodelc/model.mil
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
program(1.0)
|
2 |
-
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "
|
3 |
{
|
4 |
func main<ios16>(tensor<int32, [1]> cache_length, tensor<fp16, [1, 448]> decoder_key_padding_mask, tensor<fp16, [1, 384, 1, 1500]> encoder_output_embeds, tensor<int32, [1]> input_ids, tensor<fp16, [1, 1536, 1, 448]> key_cache, tensor<fp16, [1, 448]> kv_cache_update_mask, tensor<fp16, [1, 1536, 1, 448]> value_cache) {
|
5 |
tensor<int32, []> var_24_axis_0 = const()[name = tensor<string, []>("op_24_axis_0"), val = tensor<int32, []>(0)];
|
|
|
1 |
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.3.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.2"}})]
|
3 |
{
|
4 |
func main<ios16>(tensor<int32, [1]> cache_length, tensor<fp16, [1, 448]> decoder_key_padding_mask, tensor<fp16, [1, 384, 1, 1500]> encoder_output_embeds, tensor<int32, [1]> input_ids, tensor<fp16, [1, 1536, 1, 448]> key_cache, tensor<fp16, [1, 448]> kv_cache_update_mask, tensor<fp16, [1, 1536, 1, 448]> value_cache) {
|
5 |
tensor<int32, []> var_24_axis_0 = const()[name = tensor<string, []>("op_24_axis_0"), val = tensor<int32, []>(0)];
|
openai_whisper-tiny.en/TextDecoder.mlmodelc/weights/weight.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 59215664
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a18097f2baf8507103ef5319af13ffea77cfa8ee5170e01cee84a63336b65fd7
|
3 |
size 59215664
|