You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_python_tokenizer.py 1.7 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """
  16. Testing PythonTokenizer op in DE
  17. """
  18. import mindspore.dataset as ds
  19. import mindspore.dataset.text as text
  20. from mindspore import log as logger
  21. DATA_FILE = "../data/dataset/testTokenizerData/1.txt"
  22. def test_whitespace_tokenizer_ch():
  23. """
  24. Test PythonTokenizer
  25. """
  26. whitespace_strs = [["Welcome", "to", "Beijing!"],
  27. ["北京欢迎您!"],
  28. ["我喜欢English!"],
  29. [""]]
  30. def my_tokenizer(line):
  31. words = line.split()
  32. if not words:
  33. return [""]
  34. return words
  35. dataset = ds.TextFileDataset(DATA_FILE, shuffle=False)
  36. tokenizer = text.PythonTokenizer(my_tokenizer)
  37. dataset = dataset.map(operations=tokenizer, num_parallel_workers=1)
  38. tokens = []
  39. for i in dataset.create_dict_iterator():
  40. s = text.to_str(i['text']).tolist()
  41. tokens.append(s)
  42. logger.info("The out tokens is : {}".format(tokens))
  43. assert whitespace_strs == tokens
  44. if __name__ == '__main__':
  45. test_whitespace_tokenizer_ch()