Extractive#

This tutorial is available as an IPython notebook at Malaya/example/keyword-extractive.

[1]:
import os

os.environ['CUDA_VISIBLE_DEVICES'] = ''
[2]:
import malaya
[3]:
# https://www.bharian.com.my/berita/nasional/2020/06/698386/isu-bersatu-tun-m-6-yang-lain-saman-muhyiddin

string = """
Dalam saman itu, plaintif memohon perisytiharan, antaranya mereka adalah ahli BERSATU yang sah, masih lagi memegang jawatan dalam parti (bagi pemegang jawatan) dan layak untuk bertanding pada pemilihan parti.

Mereka memohon perisytiharan bahawa semua surat pemberhentian yang ditandatangani Muhammad Suhaimi bertarikh 28 Mei lalu dan pengesahan melalui mesyuarat Majlis Pimpinan Tertinggi (MPT) parti bertarikh 4 Jun lalu adalah tidak sah dan terbatal.

Plaintif juga memohon perisytiharan bahawa keahlian Muhyiddin, Hamzah dan Muhammad Suhaimi di dalam BERSATU adalah terlucut, berkuat kuasa pada 28 Februari 2020 dan/atau 29 Februari 2020, menurut Fasal 10.2.3 perlembagaan parti.

Yang turut dipohon, perisytiharan bahawa Seksyen 18C Akta Pertubuhan 1966 adalah tidak terpakai untuk menghalang pelupusan pertikaian berkenaan oleh mahkamah.

Perisytiharan lain ialah Fasal 10.2.6 Perlembagaan BERSATU tidak terpakai di atas hal melucutkan/ memberhentikan keahlian semua plaintif.
"""
[4]:
import re

# minimum cleaning, just simply to remove newlines.
def cleaning(string):
    string = string.replace('\n', ' ')
    string = re.sub('[^A-Za-z\-() ]+', ' ', string).strip()
    string = re.sub(r'[ ]+', ' ', string).strip()
    return string

string = cleaning(string)

Use RAKE algorithm#

Original implementation from https://github.com/aneesha/RAKE. Malaya added attention mechanism into RAKE algorithm.

def rake(
    string: str,
    model = None,
    vectorizer = None,
    top_k: int = 5,
    atleast: int = 1,
    stopwords = get_stopwords,
    **kwargs
):
    """
    Extract keywords using Rake algorithm.

    Parameters
    ----------
    string: str
    model: Object, optional (default=None)
        Transformer model or any model has `attention` method.
    vectorizer: Object, optional (default=None)
        Prefer `sklearn.feature_extraction.text.CountVectorizer` or,
        `malaya.text.vectorizer.SkipGramCountVectorizer`.
        If None, will generate ngram automatically based on `stopwords`.
    top_k: int, optional (default=5)
        return top-k results.
    ngram: tuple, optional (default=(1,1))
        n-grams size.
    atleast: int, optional (default=1)
        at least count appeared in the string to accept as candidate.
    stopwords: List[str], (default=malaya.texts.function.get_stopwords)
        A callable that returned a List[str], or a List[str], or a Tuple[str]
        For automatic Ngram generator.

    Returns
    -------
    result: Tuple[float, str]
    """

auto-ngram#

This will auto generated N-size ngram for keyword candidates.

[5]:
malaya.keyword.extractive.rake(string)
[5]:
[(0.11666666666666665, 'ditandatangani Muhammad Suhaimi bertarikh Mei'),
 (0.08888888888888888, 'mesyuarat Majlis Pimpinan Tertinggi'),
 (0.08888888888888888, 'Seksyen C Akta Pertubuhan'),
 (0.05138888888888888, 'parti bertarikh Jun'),
 (0.04999999999999999, 'keahlian Muhyiddin Hamzah')]

auto-gram with Attention#

[6]:
electra = malaya.transformer.huggingface(model = 'mesolitica/electra-base-generator-bahasa-cased')
[7]:
malaya.keyword.extractive.rake(string, model = electra)
[7]:
[(0.17997008100624823, 'mesyuarat Majlis Pimpinan Tertinggi'),
 (0.1483454093851034, 'Seksyen C Akta Pertubuhan'),
 (0.12264519442465575, 'ditandatangani Muhammad Suhaimi bertarikh Mei'),
 (0.0648944024912417, 'terlucut berkuat kuasa'),
 (0.057367307347866874, 'menghalang pelupusan pertikaian')]

using vectorizer#

[8]:
from malaya.text.vectorizer import SkipGramCountVectorizer

stopwords = malaya.text.function.get_stopwords()
vectorizer = SkipGramCountVectorizer(
    token_pattern = r'[\S]+',
    ngram_range = (1, 3),
    stop_words = stopwords,
    lowercase = False,
    skip = 2
)
[9]:
malaya.keyword.extractive.rake(string, vectorizer = vectorizer)
[9]:
[(0.0017052987393271276, 'parti memohon perisytiharan'),
 (0.0017036368782590756, 'memohon perisytiharan BERSATU'),
 (0.0017012023597074357, 'memohon perisytiharan sah'),
 (0.0017012023597074357, 'sah memohon perisytiharan'),
 (0.0016992809994779549, 'perisytiharan BERSATU sah')]

fixed-ngram with Attention#

[10]:
malaya.keyword.extractive.rake(string, model = electra, vectorizer = vectorizer)
[10]:
[(0.003363637295440784, 'Majlis Pimpinan Tertinggi'),
 (0.0033245617879032238, 'Majlis Pimpinan (MPT)'),
 (0.003241558153239806, 'mesyuarat Majlis Pimpinan'),
 (0.003145061732603308, 'pengesahan Majlis Pimpinan'),
 (0.0031039185739102334, 'Mei Majlis Pimpinan')]

Use Textrank algorithm#

Malaya simply use textrank algorithm.

def textrank(
    string: str,
    model = None,
    vectorizer = None,
    top_k: int = 5,
    atleast: int = 1,
    stopwords = get_stopwords,
    **kwargs
):
    """
    Extract keywords using Textrank algorithm.

    Parameters
    ----------
    string: str
    model: Object, optional (default='None')
        model has `fit_transform` or `vectorize` method.
    vectorizer: Object, optional (default=None)
        Prefer `sklearn.feature_extraction.text.CountVectorizer` or,
        `malaya.text.vectorizer.SkipGramCountVectorizer`.
        If None, will generate ngram automatically based on `stopwords`.
    top_k: int, optional (default=5)
        return top-k results.
    atleast: int, optional (default=1)
        at least count appeared in the string to accept as candidate.
    stopwords: List[str], (default=malaya.texts.function.get_stopwords)
        A callable that returned a List[str], or a List[str], or a Tuple[str]

    Returns
    -------
    result: Tuple[float, str]
    """
[11]:
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer()

auto-ngram with TFIDF#

This will auto generated N-size ngram for keyword candidates.

[12]:
malaya.keyword.extractive.textrank(string, model = tfidf)
2022-11-13 14:12:36.563458: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2022-11-13 14:12:36.588292: E tensorflow/stream_executor/cuda/cuda_driver.cc:271] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected
2022-11-13 14:12:36.588326: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: husein-MS-7D31
2022-11-13 14:12:36.588331: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: husein-MS-7D31
2022-11-13 14:12:36.588424: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: Not found: was unable to find libcuda.so DSO loaded into this program
2022-11-13 14:12:36.588453: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 470.141.3
[12]:
[(0.00015733542072521276, 'plaintif memohon perisytiharan'),
 (0.00012558967703709949, 'Fasal perlembagaan parti'),
 (0.00011514137183023086, 'Fasal Perlembagaan BERSATU'),
 (0.00011505528232050443, 'parti'),
 (0.00010763519022276223, 'memohon perisytiharan')]

auto-ngram with Attention#

This will auto generated N-size ngram for keyword candidates.

[13]:
malaya.transformer.available_huggingface()
[13]:
Size (MB)
mesolitica/roberta-base-bahasa-cased 443.0
mesolitica/roberta-tiny-bahasa-cased 66.1
mesolitica/bert-base-standard-bahasa-cased 443.0
mesolitica/bert-tiny-standard-bahasa-cased 66.1
mesolitica/roberta-base-standard-bahasa-cased 443.0
mesolitica/roberta-tiny-standard-bahasa-cased 66.1
mesolitica/electra-base-generator-bahasa-cased 140.0
mesolitica/electra-small-generator-bahasa-cased 19.3
mesolitica/finetune-mnli-t5-super-tiny-standard-bahasa-cased 50.7
mesolitica/finetune-mnli-t5-tiny-standard-bahasa-cased 139.0
mesolitica/finetune-mnli-t5-small-standard-bahasa-cased 242.0
mesolitica/finetune-mnli-t5-base-standard-bahasa-cased 892.0
[14]:
electra = malaya.transformer.huggingface(model = 'mesolitica/electra-small-generator-bahasa-cased')
roberta = malaya.transformer.huggingface(model = 'mesolitica/roberta-tiny-bahasa-cased')
[15]:
malaya.keyword.extractive.textrank(string, model = electra)
[15]:
[(6.3182659440111e-05, 'dipohon perisytiharan'),
 (6.316746690755411e-05, 'pemegang jawatan'),
 (6.316118879734439e-05, 'parti bertarikh Jun'),
 (6.31610442906559e-05, 'Februari'),
 (6.315818922956695e-05, 'plaintif')]
[16]:
malaya.keyword.extractive.textrank(string, model = roberta)
[16]:
[(6.59244746396528e-05, 'parti'),
 (6.584499867695376e-05, 'keahlian Muhyiddin Hamzah'),
 (6.488547404816735e-05, 'dipohon perisytiharan'),
 (6.45851817185769e-05, 'surat pemberhentian'),
 (6.436101053983185e-05, 'parti bertarikh Jun')]

Or you can use any classification model to find keywords sensitive towards to specific domain.

[17]:
sentiment = malaya.sentiment.transformer(model = 'xlnet', quantized = True)
Load quantized model will cause accuracy drop.
[18]:
malaya.keyword.extractive.textrank(string, model = sentiment)
2022-11-13 14:13:04.195921: W tensorflow/core/framework/cpu_allocator_impl.cc:80] Allocation of 98304000 exceeds 10% of free system memory.
2022-11-13 14:13:04.454041: W tensorflow/core/framework/cpu_allocator_impl.cc:80] Allocation of 98304000 exceeds 10% of free system memory.
2022-11-13 14:13:04.619901: W tensorflow/core/framework/cpu_allocator_impl.cc:80] Allocation of 98304000 exceeds 10% of free system memory.
2022-11-13 14:13:04.771232: W tensorflow/core/framework/cpu_allocator_impl.cc:80] Allocation of 98304000 exceeds 10% of free system memory.
2022-11-13 14:13:04.944275: W tensorflow/core/framework/cpu_allocator_impl.cc:80] Allocation of 98304000 exceeds 10% of free system memory.
[18]:
[(6.847900589825627e-05, 'mahkamah Perisytiharan'),
 (6.846671026597114e-05, 'plaintif memohon perisytiharan'),
 (6.76475758513156e-05, 'Seksyen C Akta Pertubuhan'),
 (6.713097984411573e-05, 'terpakai'),
 (6.675669252867354e-05, 'memegang jawatan')]

fixed-ngram with Attention#

[19]:
stopwords = malaya.text.function.get_stopwords()
vectorizer = SkipGramCountVectorizer(
    token_pattern = r'[\S]+',
    ngram_range = (1, 3),
    stop_words = stopwords,
    lowercase = False,
    skip = 2
)
[20]:
malaya.keyword.extractive.textrank(string, model = electra, vectorizer = vectorizer)
[20]:
[(5.652169583755948e-09, 'plaintif perisytiharan'),
 (5.652076035266543e-09, 'perisytiharan ahli sah'),
 (5.651996008669155e-09, 'Plaintif perisytiharan keahlian'),
 (5.651931474886066e-09, 'Perisytiharan'),
 (5.6517038324285826e-09, 'plaintif memohon perisytiharan')]
[21]:
malaya.keyword.extractive.textrank(string, model = roberta, vectorizer = vectorizer)
[21]:
[(5.923585394811239e-09, 'keahlian Muhyiddin Muhammad'),
 (5.916529250417852e-09, 'parti bertarikh'),
 (5.913084767462724e-09, 'kuasa Fasal'),
 (5.902600575309735e-09, 'C Akta menghalang'),
 (5.900931358064983e-09, 'keahlian Muhyiddin')]

Use Attention mechanism#

Use attention mechanism from transformer model to get important keywords.

def attention(
    string: str,
    model,
    vectorizer = None,
    top_k: int = 5,
    atleast: int = 1,
    stopwords = get_stopwords,
    **kwargs
):
    """
    Extract keywords using Attention mechanism.

    Parameters
    ----------
    string: str
    model: Object
        Transformer model or any model has `attention` method.
    vectorizer: Object, optional (default=None)
        Prefer `sklearn.feature_extraction.text.CountVectorizer` or,
        `malaya.text.vectorizer.SkipGramCountVectorizer`.
        If None, will generate ngram automatically based on `stopwords`.
    top_k: int, optional (default=5)
        return top-k results.
    atleast: int, optional (default=1)
        at least count appeared in the string to accept as candidate.
    stopwords: List[str], (default=malaya.texts.function.get_stopwords)
        A callable that returned a List[str], or a List[str], or a Tuple[str]

    Returns
    -------
    result: Tuple[float, str]
    """

auto-ngram#

This will auto generated N-size ngram for keyword candidates.

[22]:
malaya.keyword.extractive.attention(string, model = electra)
[22]:
[(0.7273892307219683, 'menghalang pelupusan pertikaian'),
 (0.03776807419211517, 'plaintif memohon perisytiharan'),
 (0.03168170723172234, 'dipohon perisytiharan'),
 (0.031016994512984893, 'memohon perisytiharan'),
 (0.021767187593695503, 'ditandatangani Muhammad Suhaimi bertarikh Mei')]
[23]:
malaya.keyword.extractive.attention(string, model = roberta)
[23]:
[(0.07387573605296668, 'plaintif memohon perisytiharan'),
 (0.06143065962303025, 'Fasal perlembagaan parti'),
 (0.05755474708967133, 'ditandatangani Muhammad Suhaimi bertarikh Mei'),
 (0.056663920761205355, 'Fasal Perlembagaan BERSATU'),
 (0.05564947633228625, 'memohon perisytiharan')]

fixed-ngram#

[24]:
malaya.keyword.extractive.attention(string, model = electra, vectorizer = vectorizer)
[24]:
[(0.029306966715927522, 'pertikaian Perisytiharan Fasal'),
 (0.029205816951205835, 'pertikaian mahkamah Fasal'),
 (0.02919627357369081, 'pertikaian Fasal Perlembagaan'),
 (0.029187334652333977, 'pelupusan pertikaian Fasal'),
 (0.02917217720500391, 'pertikaian Fasal')]
[25]:
malaya.keyword.extractive.attention(string, model = roberta, vectorizer = vectorizer)
[25]:
[(0.0034241675733855262, 'parti memohon perisytiharan'),
 (0.0032962150978338763, 'memohon perisytiharan BERSATU'),
 (0.003188648497869482, 'plaintif perisytiharan BERSATU'),
 (0.00318152613103623, 'BERSATU sah parti'),
 (0.0031634493792625897, 'perisytiharan BERSATU sah')]

Use similarity mechanism#

def similarity(
    string: str,
    model,
    vectorizer = None,
    top_k: int = 5,
    atleast: int = 1,
    stopwords = get_stopwords,
    **kwargs,
):
    """
    Extract keywords using Sentence embedding VS keyword embedding similarity.

    Parameters
    ----------
    string: str
    model: Object
        Transformer model or any model has `vectorize` method.
    vectorizer: Object, optional (default=None)
        Prefer `sklearn.feature_extraction.text.CountVectorizer` or,
        `malaya.text.vectorizer.SkipGramCountVectorizer`.
        If None, will generate ngram automatically based on `stopwords`.
    top_k: int, optional (default=5)
        return top-k results.
    atleast: int, optional (default=1)
        at least count appeared in the string to accept as candidate.
    stopwords: List[str], (default=malaya.texts.function.get_stopwords)
        A callable that returned a List[str], or a List[str], or a Tuple[str]

    Returns
    -------
    result: Tuple[float, str]
    """
[26]:
malaya.keyword.extractive.similarity(string, model = roberta)
[26]:
[(0.87396985, 'plaintif memohon perisytiharan'),
 (0.87190473, 'keahlian Muhyiddin Hamzah'),
 (0.8637232, 'mahkamah Perisytiharan'),
 (0.86043787, 'dipohon perisytiharan'),
 (0.8529165, 'memohon perisytiharan')]
[27]:
malaya.keyword.extractive.similarity(string, model = electra)
[27]:
[(0.99829656, 'keahlian Muhyiddin Hamzah'),
 (0.9982589, 'mesyuarat Majlis Pimpinan Tertinggi'),
 (0.9981606, 'Fasal perlembagaan parti'),
 (0.9981443, 'Fasal Perlembagaan BERSATU'),
 (0.9979403, 'ditandatangani Muhammad Suhaimi bertarikh Mei')]
[28]:
t5 = malaya.transformer.huggingface(model = 'mesolitica/finetune-mnli-t5-small-standard-bahasa-cased')
[29]:
malaya.keyword.extractive.similarity(string, model = t5)
[29]:
[(0.858336, 'mesyuarat Majlis Pimpinan Tertinggi'),
 (0.8571082, 'mahkamah Perisytiharan'),
 (0.7085603, 'Muhammad Suhaimi'),
 (0.68325794, 'terlucut berkuat kuasa'),
 (0.6421398, 'MPT')]