Word and sentence tokenizer¶
This tutorial is available as an IPython notebook at Malaya/example/tokenizer.
[1]:
%%time
import malaya
CPU times: user 5.91 s, sys: 1.12 s, total: 7.03 s
Wall time: 7.62 s
/Users/huseinzolkepli/Documents/Malaya/malaya/preprocessing.py:259: FutureWarning: Possible nested set at position 2289
self.tok = re.compile(r'({})'.format('|'.join(pipeline)))
[2]:
string1 = 'xjdi ke, y u xsuke makan HUSEIN kt situ tmpt, i hate it. pelikle, pada'
string2 = 'i mmg2 xske mknn HUSEIN kampng tmpat, i love them. pelikle saye'
string3 = 'perdana menteri ke11 sgt suka makn ayam, harganya cuma rm15.50'
string4 = 'pada 10/4, kementerian mengumumkan, 1/100'
string5 = 'Husein Zolkepli dapat tempat ke-12 lumba lari hari ni'
string6 = 'Husein Zolkepli (2011 - 2019) adalah ketua kampng di kedah sekolah King Edward ke-IV'
string7 = '2jam 30 minit aku tunggu kau, 60.1 kg kau ni, suhu harini 31.2c, aku dahaga minum 600ml'
Load word tokenizer¶
class Tokenizer:
def __init__(self, lowercase = False, **kwargs):
"""
Load Tokenizer object.
Check supported regex pattern at https://github.com/huseinzol05/Malaya/blob/master/malaya/text/regex.py#L85
Parameters
----------
lowercase: bool, optional (default=False)
lowercase tokens.
emojis: bool, optional (default=True)
True to keep emojis.
urls: bool, optional (default=True)
True to keep urls.
tags: bool, optional (default=True)
True to keep tags: <tag>.
emails: bool, optional (default=True)
True to keep emails.
users: bool, optional (default=True)
True to keep users handles: @cbaziotis.
hashtags: bool, optional (default=True)
True to keep hashtags.
phones: bool, optional (default=True)
True to keep phones.
percents: bool, optional (default=True)
True to keep percents.
money: bool, optional (default=True)
True to keep money expressions.
date: bool, optional (default=True)
True to keep date expressions.
time: bool, optional (default=True)
True to keep time expressions.
acronyms: bool, optional (default=True)
True to keep acronyms.
emoticons: bool, optional (default=True)
True to keep emoticons.
censored: bool, optional (default=True)
True to keep censored words: f**k.
emphasis: bool, optional (default=True)
True to keep words with emphasis: *very* good.
numbers: bool, optional (default=True)
True to keep numbers.
temperature: bool, optional (default=True)
True to keep temperatures
distance: bool, optional (default=True)
True to keep distances.
volume: bool, optional (default=True)
True to keep volumes.
duration: bool, optional (default=True)
True to keep durations.
weight: bool, optional (default=True)
True to keep weights.
hypen: bool, optional (default=True)
True to keep hypens.
"""
[3]:
tokenizer = malaya.preprocessing.Tokenizer()
[4]:
tokenizer.tokenize(string1)
[4]:
['xjdi',
'ke',
',',
'y',
'u',
'xsuke',
'makan',
'HUSEIN',
'kt',
'situ',
'tmpt',
',',
'i',
'hate',
'it',
'.',
'pelikle',
',',
'pada']
[5]:
tokenizer.tokenize(string2)
[5]:
['i',
'mmg2',
'xske',
'mknn',
'HUSEIN',
'kampng',
'tmpat',
',',
'i',
'love',
'them',
'.',
'pelikle',
'saye']
[6]:
tokenizer.tokenize(string3)
[6]:
['perdana',
'menteri',
'ke11',
'sgt',
'suka',
'makn',
'ayam',
',',
'harganya',
'cuma',
'rm15.50']
[7]:
tokenizer.tokenize(string4)
[7]:
['pada',
'10',
'/',
'4',
',',
'kementerian',
'mengumumkan',
',',
'1',
'/',
'100']
[8]:
tokenizer.tokenize(string6)
[8]:
['Husein',
'Zolkepli',
'(',
'2011',
'-',
'2019',
')',
'adalah',
'ketua',
'kampng',
'di',
'kedah',
'sekolah',
'King',
'Edward',
'ke-IV']
[9]:
tokenizer.tokenize(string7)
[9]:
['2jam',
'30 minit',
'aku',
'tunggu',
'kau',
',',
'60.1 kg',
'kau',
'ni',
',',
'suhu',
'harini',
'31.2c',
',',
'aku',
'dahaga',
'minum',
'600ml']
url¶
[10]:
tokenizer.tokenize('website saya http://huseinhouse.com')
[10]:
['website', 'saya', 'http://huseinhouse.com']
tags¶
[12]:
tokenizer.tokenize('panggil saya <husein>')
[12]:
['panggil', 'saya', '<husein>']
[13]:
tokenizer.tokenize('panggil saya <husein >')
[13]:
['panggil', 'saya', '<', 'husein', '>']
emails¶
[14]:
tokenizer.tokenize('email saya husein@rumah.com')
[14]:
['email', 'saya', 'husein@rumah.com']
[15]:
tokenizer.tokenize('email saya husein@rumah.com.my')
[15]:
['email', 'saya', 'husein@rumah.com.my']
users¶
[16]:
tokenizer.tokenize('twitter saya @husein123zolkepli')
[16]:
['twitter', 'saya', '@husein123zolkepli']
[17]:
tokenizer.tokenize('twitter saya @ husein123zolkepli')
[17]:
['twitter', 'saya', '@', 'husein123zolkepli']
hashtags¶
[18]:
tokenizer.tokenize('panggil saya #huseincomel')
[18]:
['panggil', 'saya', '#huseincomel']
[19]:
tokenizer.tokenize('panggil saya # huseincomel')
[19]:
['panggil', 'saya', '#', 'huseincomel']
phones¶
[20]:
tokenizer.tokenize('call sye di 013-1234567')
[20]:
['call', 'sye', 'di', '013-1234567']
[27]:
tokenizer.tokenize('call sye di 013- 1234567')
[27]:
['call', 'sye', 'di', '013', '-', '1234567']
percents¶
[28]:
tokenizer.tokenize('saya sokong 100%')
[28]:
['saya', 'sokong', '100%']
[29]:
tokenizer.tokenize('saya sokong 100 %')
[29]:
['saya', 'sokong', '100', '%']
money¶
[30]:
tokenizer.tokenize('saya tinggal rm100')
[30]:
['saya', 'tinggal', 'rm100']
[31]:
tokenizer.tokenize('saya tinggal rm100k')
[31]:
['saya', 'tinggal', 'rm100k']
[32]:
tokenizer.tokenize('saya tinggal rm100M')
[32]:
['saya', 'tinggal', 'rm100M']
[33]:
tokenizer.tokenize('saya tinggal rm100.123M')
[33]:
['saya', 'tinggal', 'rm100.123M']
[34]:
tokenizer.tokenize('saya tinggal 40 sen')
[34]:
['saya', 'tinggal', '40 sen']
[35]:
tokenizer.tokenize('saya tinggal 21 ringgit 50 sen')
[35]:
['saya', 'tinggal', '21 ringgit', '50 sen']
date¶
[36]:
tokenizer.tokenize('tarikh perjumpaan 10/11/2011')
[36]:
['tarikh', 'perjumpaan', '10/11/2011']
[37]:
tokenizer.tokenize('tarikh perjumpaan 10-11-2011')
[37]:
['tarikh', 'perjumpaan', '10-11-2011']
[38]:
tokenizer.tokenize('tarikh perjumpaan 12 mei 2011')
[38]:
['tarikh', 'perjumpaan', '12 mei 2011']
[39]:
tokenizer.tokenize('tarikh perjumpaan mei 12 2011')
[39]:
['tarikh', 'perjumpaan', 'mei 12 2011']
time¶
[40]:
tokenizer.tokenize('jumpa 3 am')
[40]:
['jumpa', '3 am']
[41]:
tokenizer.tokenize('jumpa 22:00')
[41]:
['jumpa', '22:00']
censored¶
[42]:
tokenizer.tokenize('f**k lah')
[42]:
['f**k', 'lah']
emphasis¶
[43]:
tokenizer.tokenize('*damn* good weih')
[43]:
['*damn*', 'good', 'weih']
numbers¶
[44]:
tokenizer.tokenize('no saya 123')
[44]:
['no', 'saya', '123']
temperature¶
[45]:
tokenizer.tokenize('sejuk harini, 31.1c')
[45]:
['sejuk', 'harini', ',', '31.1c']
[46]:
tokenizer.tokenize('sejuk harini, 31.1C')
[46]:
['sejuk', 'harini', ',', '31.1C']
distance¶
[47]:
tokenizer.tokenize('nak sampai lagi 31km')
[47]:
['nak', 'sampai', 'lagi', '31km']
[48]:
tokenizer.tokenize('nak sampai lagi 31 km')
[48]:
['nak', 'sampai', 'lagi', '31 km']
volume¶
[49]:
tokenizer.tokenize('botol ni 400ml')
[49]:
['botol', 'ni', '400ml']
[50]:
tokenizer.tokenize('botol ni 400 l')
[50]:
['botol', 'ni', '400 l']
duration¶
[51]:
tokenizer.tokenize('aku dah tunggu kau 2jam kut')
[51]:
['aku', 'dah', 'tunggu', 'kau', '2jam', 'kut']
[52]:
tokenizer.tokenize('aku dah tunggu kau 2 jam kut')
[52]:
['aku', 'dah', 'tunggu', 'kau', '2 jam', 'kut']
[53]:
tokenizer.tokenize('lagi 10 minit 3 jam')
[53]:
['lagi', '10 minit', '3 jam']
weight¶
[54]:
tokenizer.tokenize('berat kau 60 kg')
[54]:
['berat', 'kau', '60 kg']
[55]:
tokenizer.tokenize('berat kau 60kg')
[55]:
['berat', 'kau', '60kg']
hypen¶
[56]:
tokenizer.tokenize('sememang-memangnya kau sakai')
[56]:
['sememang-memangnya', 'kau', 'sakai']
[57]:
tokenizer.tokenize('sememang- memangnya kau sakai')
[57]:
['sememang', '-', 'memangnya', 'kau', 'sakai']
Sentence tokenizer¶
We considered prefixes, suffixes, starters, acronyms, websites, emails, digits, before digits, time and month to split a sentence into multiple sentences.
def split_into_sentences(text, minimum_length = 5):
"""
Sentence tokenizer.
Parameters
----------
text: str
minimum_length: int, optional (default=5)
minimum length to assume a string is a string, default 5 characters.
Returns
-------
result: List[str]
"""
[58]:
s = """
no.1 polis bertemu dengan suspek di ladang getah. polis tembak pui pui pui bertubi tubi
"""
[59]:
malaya.text.function.split_into_sentences(s)
[59]:
['no.1 polis bertemu dengan suspek di ladang getah.',
'polis tembak pui pui pui bertubi tubi.']
[60]:
s = """
email saya di husein.zol01@gmail.com, nanti jom berkopi
"""
[61]:
malaya.text.function.split_into_sentences(s)
[61]:
['email saya di husein.zol01@gmail.com, nanti jom berkopi.']
[62]:
s = """
ke. 2 cerita nya begini. saya berjalan jalan ditepi muara jumpa anak dara.
"""
[63]:
malaya.text.function.split_into_sentences(s)
[63]:
['ke.2 cerita nya begini.',
'saya berjalan jalan ditepi muara jumpa anak dara.']
[ ]: