Merge pull request #787 from XJDKC/keep-order
Fix the graph operation when tensor is written by multiple independent ops
diff --git a/.github/workflows/macOS.yaml b/.github/workflows/macOS.yaml
index 795beb6..d38ce32 100644
--- a/.github/workflows/macOS.yaml
+++ b/.github/workflows/macOS.yaml
@@ -31,17 +31,26 @@
with:
python-version: "3.7"
- name: install-build-dependencies
- run: brew install protobuf swig opencv glog lmdb numpy && pip3 install numpy \
- && wget https://github.com/oneapi-src/oneDNN/releases/download/v1.1.3/dnnl_mac_1.1.3_cpu_iomp.tgz -P /tmp && tar zxf /tmp/dnnl_mac_1.1.3_cpu_iomp.tgz -C .
+ run: |
+ brew install protobuf swig opencv glog lmdb numpy
+ pip3 install numpy && wget https://github.com/oneapi-src/oneDNN/releases/download/v1.2/dnnl_mac_1.2.0_cpu_tbb.tgz -P /tmp
+ tar zxf /tmp/dnnl_mac_1.2.0_cpu_tbb.tgz -C /tmp
- name: configure
run: mkdir build && cd build && cmake -DUSE_PYTHON3=YES -DENABLE_TEST=YES -DUSE_DNNL=YES ..
env:
CMAKE_INCLUDE_PATH: /usr/local/opt/openblas/include:$CMAKE_INCLUDE_PATH
CMAKE_LIBRARY_PATH: /usr/local/opt/openblas/lib:$CMAKE_LIBRARY_PATH
- DNNL_ROOT: `pwd`/dnnl_mac_1.1.3_cpu_iomp
+ DNNL_ROOT: /tmp/dnnl_mac_1.2.0_cpu_tbb/
- name: build
run: cd build && make
env:
CXXFLAGS: -I /Users/runner/hostedtoolcache/Python/3.7.8/x64/lib/python3.7/site-packages/numpy/core/include $CXXFLAGS
+ LD_LIBRARY_PATH: /usr/local/opt/openblas/lib:/tmp/dnnl_mac_1.2.0_cpu_tbb/lib:$LD_LIBRARY_PATH
- name: C++ test
- run: build/bin/test_singa
\ No newline at end of file
+ run: |
+ brew install tbb
+ install_name_tool -change libdnnl.1.dylib /tmp/dnnl_mac_1.2.0_cpu_tbb/lib/libdnnl.1.dylib /Users/runner/work/singa/singa/build/lib/libsinga.dylib
+ install_name_tool -change libdnnl.1.dylib /tmp/dnnl_mac_1.2.0_cpu_tbb/lib/libdnnl.1.dylib build/bin/test_singa
+ build/bin/test_singa
+ env:
+ LD_LIBRARY_PATH: /usr/local/opt/openblas/lib:/tmp/dnnl_mac_1.2.0_cpu_tbb/lib:$LD_LIBRARY_PATH
diff --git a/examples/rnn/imdb_data.py b/examples/rnn/imdb_data.py
index 2511027..973f9e5 100644
--- a/examples/rnn/imdb_data.py
+++ b/examples/rnn/imdb_data.py
@@ -36,6 +36,7 @@
imdb_dataset_link = "https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
google_news_pretrain_embeddings_link = "https://s3.amazonaws.com/dl4j-distribution/GoogleNews-vectors-negative300.bin.gz"
+
def pad_batch(b, seq_limit):
''' convert a batch of encoded sequence
to pretrained word vectors from the embed weights (lookup dictionary)
@@ -63,6 +64,7 @@
batch_seq = np.array(batch_seq).astype(np.int32)
return batch_seq, batch_senti_onehot, batch_senti
+
def pad_batch_2vec(b, seq_limit, embed_weights):
''' convert a batch of encoded sequence
to pretrained word vectors from the embed weights (lookup dictionary)
@@ -123,7 +125,7 @@
def remove_special_characters(text, remove_digits=True):
''' lambda fn for removing special char '''
- pattern = r'[^a-zA-z0-9\s]'
+ pattern = r'[^a-zA-Z0-9\s]'
text = re.sub(pattern, '', text)
return text
@@ -183,7 +185,7 @@
data_dir = unzip_data(download_dir, data_gz)
# imdb dirs
- vocab_f = data_dir + '/imdb.vocab'
+ # vocab_f = data_dir + '/imdb.vocab'
train_pos_dir = data_dir + '/train/pos/'
train_neg_dir = data_dir + '/train/neg/'
test_pos_dir = data_dir + '/test/pos/'
@@ -205,7 +207,9 @@
(test_pos_dir, 1), (test_neg_dir, 0)]:
for filename in os.listdir(data_dir):
if filename.endswith(".txt"):
- with open(os.path.join(data_dir, filename), "r", encoding="utf-8") as fhdl:
+ with open(os.path.join(data_dir, filename),
+ "r",
+ encoding="utf-8") as fhdl:
data.append((fhdl.read(), label))
# text review cleaning
diff --git a/python/singa/device.py b/python/singa/device.py
index 3c8f8fc..cfc3eb8 100644
--- a/python/singa/device.py
+++ b/python/singa/device.py
@@ -22,7 +22,7 @@
TODO(wangwei) implement py CudaGPU class.
'''
-from builtins import object
+# from builtins import object
from . import singa_wrap as singa