1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
|
diff --git a/examples/end2end_tfkeras.py b/examples/end2end_tfkeras.py
index 19da4d3..1150f1b 100644
--- a/examples/end2end_tfkeras.py
+++ b/examples/end2end_tfkeras.py
@@ -56,7 +56,7 @@ print(proc.stderr.decode('ascii'))
########################################
# Runs onnxruntime.
-session = InferenceSession("simple_rnn.onnx")
+session = InferenceSession("simple_rnn.onnx", providers=['CPUExecutionProvider'])
got = session.run(None, {'input_1': input})
print(got[0])
diff --git a/examples/getting_started.py b/examples/getting_started.py
index d00ea03..25bc3bc 100644
--- a/examples/getting_started.py
+++ b/examples/getting_started.py
@@ -27,7 +27,7 @@ print("Tensorflow result")
print(f(a_val, b_val).numpy())
print("ORT result")
-sess = ort.InferenceSession(onnx_model.SerializeToString())
+sess = ort.InferenceSession(onnx_model.SerializeToString(), providers=['CPUExecutionProvider'])
res = sess.run(None, {'a': a_val, 'b': b_val})
print(res[0])
@@ -46,7 +46,7 @@ print("Keras result")
print(model(x_val).numpy())
print("ORT result")
-sess = ort.InferenceSession(onnx_model.SerializeToString())
+sess = ort.InferenceSession(onnx_model.SerializeToString(), providers=['CPUExecutionProvider'])
res = sess.run(None, {'x': x_val})
print(res[0])
@@ -57,7 +57,7 @@ model.save("savedmodel")
os.system("python -m tf2onnx.convert --saved-model savedmodel --output model.onnx --opset 13")
print("ORT result")
-sess = ort.InferenceSession("model.onnx")
+sess = ort.InferenceSession("model.onnx", providers=['CPUExecutionProvider'])
res = sess.run(None, {'dense_input': x_val})
print(res[0])
diff --git a/tests/test_einsum_helper.py b/tests/test_einsum_helper.py
index 9ecb5c4..05c9fe3 100644
--- a/tests/test_einsum_helper.py
+++ b/tests/test_einsum_helper.py
@@ -27,7 +27,7 @@ class TestEinsum(Tf2OnnxBackendTestBase):
def apply_einsum_sequence(self, seq, *inputs):
names = ["X%d" % i for i in range(len(inputs))]
onx = seq.to_onnx('Y', *names, opset=self.config.opset)
- sess = InferenceSession(onx.SerializeToString())
+ sess = InferenceSession(onx.SerializeToString(), providers=['CPUExecutionProvider'])
inps = {n: i.astype(np.float32) for n, i in zip(names, inputs)}
res = sess.run(None, inps)
return res[0]
diff --git a/tests/test_einsum_optimizers.py b/tests/test_einsum_optimizers.py
index bd90131..55d1807 100644
--- a/tests/test_einsum_optimizers.py
+++ b/tests/test_einsum_optimizers.py
@@ -94,8 +94,8 @@ class EinsumOptimizerTests(Tf2OnnxBackendTestBase):
new_model_proto = self.run_einsum_compare(["Y"], feed_dict, model_proto,
catch_errors=catch_errors)
- sess1 = InferenceSession(model_proto.SerializeToString())
- sess2 = InferenceSession(new_model_proto.SerializeToString())
+ sess1 = InferenceSession(model_proto.SerializeToString(), providers=['CPUExecutionProvider'])
+ sess2 = InferenceSession(new_model_proto.SerializeToString(), providers=['CPUExecutionProvider'])
got1 = sess1.run(None, feed_dict)
got2 = sess2.run(None, feed_dict)
assert_almost_equal(got1, got2)
|