This repository was archived by the owner on Jun 9, 2021. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 25
Expand file tree
/
Copy pathdoc.py
More file actions
802 lines (693 loc) · 30.5 KB
/
doc.py
File metadata and controls
802 lines (693 loc) · 30.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
"""
Clean text, make it readable and obtain metadata from it.
"""
import functools
import re
import unicodedata
from collections import Counter
from urllib.parse import urlparse
import cld2
import numpy
import spacy
import spacy.matcher
import textacy
import textacy.ke
import textacy.text_utils
from bs4 import BeautifulSoup
from datasketch import MinHash
from gensim.models.keyedvectors import KeyedVectors
from gensim.summarization.summarizer import summarize
from textpipe.data.emoji import EMOJI_TO_UNICODE_NAME, EMOJI_TO_SENTIMENT
from textpipe.wrappers import RedisKeyedVectors
from textpipe.util import getattr_
class TextpipeMissingModelException(Exception):
"""Raised when the requested model is missing"""
class RedisIDFWeightingMismatchException(Exception):
"""Raised when an idf weighting scheme is specified that does not match the specified weighting
scheme in RedisKeyedVector"""
class Doc:
"""
Create a doc instance of text, obtain cleaned, readable text and
metadata from this doc.
Properties:
raw: incoming, unedited text
language: 2-letter code for the language of the text
is_detected_language: is the language detected or specified beforehand
is_reliable_language: is the language specified or was it reliably detected
hint_language: language you expect your text to be
_spacy_nlps: nested dictionary {lang: {model_id: model}} with loaded spacy language modules
"""
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
# pylint: disable=too-many-public-methods
def __init__(self,
raw,
language=None,
hint_language='en',
spacy_nlps=None,
gensim_vectors=None):
self.raw = raw
self._language = language
self.hint_language = hint_language
self._spacy_nlps = spacy_nlps if spacy_nlps is not None else dict()
self._gensim_vectors = gensim_vectors if gensim_vectors is not None else dict()
self.is_detected_language = language is None
self._is_reliable_language = True if language else None
self._text_stats = {}
self.nr_train_tokens = 0
@property
def language(self):
"""
Provided or detected language of a text
>>> from textpipe.doc import Doc
>>> Doc('Test sentence for testing text').language
'en'
>>> Doc('Test sentence for testing text', language='en').language
'en'
>>> Doc('Test', hint_language='nl').language
'nl'
"""
if not self._language:
self._is_reliable_language, self._language = self.detect_language(self.hint_language)
return self._language
@property
def is_reliable_language(self):
"""
True if the language was specified or if was it reliably detected
>>> from textpipe.doc import Doc
>>> Doc('...').is_reliable_language
False
>>> Doc('Test', hint_language='nl').is_reliable_language
True
"""
if self._is_reliable_language is None:
self._is_reliable_language, self._language = self.detect_language(self.hint_language)
return self._is_reliable_language
@functools.lru_cache()
def detect_language(self, hint_language=None):
"""
Detected the language of a text if no language was provided along with the text
Args:
hint_language: language you expect your text to be
Returns:
is_reliable: is the top language is much better than 2nd best language?
language: 2-letter code for the language of the text
>>> from textpipe.doc import Doc
>>> doc = Doc('Test')
>>> doc.detect_language()
(True, 'en')
>>> doc.detect_language('nl')
(True, 'nl')
>>> Doc('...').detect_language()
(False, 'un')
"""
none_utf_chars_removed = ''.join([l for l in self.clean
if unicodedata.category(l)[0] not in {'M', 'C'}])
is_reliable, _, best_guesses = cld2.detect(none_utf_chars_removed,
hintLanguage=hint_language,
bestEffort=True)
if not best_guesses or len(best_guesses[0]) != 4 or best_guesses[0][1] == 'un':
return False, 'un'
return is_reliable, best_guesses[0][1]
@property
def _spacy_doc(self):
"""
Loads the default spacy doc or creates one if necessary
>>> from textpipe.doc import Doc
>>> doc = Doc('Test sentence for testing text')
>>> type(doc._spacy_doc)
<class 'spacy.tokens.doc.Doc'>
"""
lang = self.language if self.is_reliable_language else self.hint_language
return self._load_spacy_doc(lang)
@functools.lru_cache()
def _load_spacy_doc(self, lang, model_name=None):
"""
Loads a spacy doc or creates one if necessary
"""
# Load default spacy model if necessary, if not loaded already
if lang not in self._spacy_nlps or (model_name is None and
model_name not in self._spacy_nlps[lang]):
if lang not in self._spacy_nlps:
self._spacy_nlps[lang] = {}
self._spacy_nlps[lang][None] = self._get_default_nlp(lang)
if model_name not in self._spacy_nlps[lang] and model_name is not None:
raise TextpipeMissingModelException(f'Custom model {model_name} '
f'is missing.')
nlp = self._spacy_nlps[lang][model_name]
doc = nlp(self.clean_text())
return doc
@staticmethod
@functools.lru_cache()
def _get_default_nlp(lang):
"""
Loads the spacy default language module for the Doc's language
"""
try:
return spacy.load('{}_core_{}_sm'.format(lang, 'web' if lang == 'en' else 'news'))
except IOError:
# pylint: disable=raise-missing-from
raise TextpipeMissingModelException(f'Default model for language "{lang}" '
f'is not available.')
@property
def clean(self):
"""
Cleaned text with sensible defaults.
>>> from textpipe.doc import Doc
>>> doc = Doc('“Please clean this piece… of text</b>„')
>>> doc.clean
'"Please clean this piece... of text"'
"""
return self.clean_text()
@functools.lru_cache()
def clean_text(self, remove_html=True, clean_dots=True, clean_quotes=True,
clean_whitespace=True):
"""
Clean HTML and normalise punctuation.
>>> from textpipe.doc import Doc
>>> doc = Doc('“Please clean this piece… of text</b>„')
>>> doc.clean_text(False, False, False, False) == doc.raw
True
"""
text = self.raw
if remove_html:
text = BeautifulSoup(text, 'html.parser').get_text() # remove HTML
# Three regexes below adapted from Blendle cleaner.py
# https://github.com/blendle/research-summarization/blob/master/enrichers/cleaner.py#L29
if clean_dots:
text = re.sub(r'…', '...', text)
if clean_quotes:
text = re.sub(r'[`‘’‛⸂⸃⸌⸍⸜⸝]', "'", text)
text = re.sub(r'[„“]|(\'\')|(,,)', '"', text)
if clean_whitespace:
text = re.sub(r'\s+', ' ', text).strip()
return text
@property
def ents(self):
"""
A list of the named entities with sensible defaults.
>>> from textpipe.doc import Doc
>>> doc = Doc('Sentence for testing Google text')
>>> doc.ents
[('Google', 'ORG')]
"""
return self.find_ents()
@functools.lru_cache()
def find_ents(self, model_name=None, ent_attributes=('text', 'label_')):
"""
Extract a list of the named entities in text, with the possibility of using a custom model.
>>> from textpipe.doc import Doc
>>> doc = Doc('Sentence for testing Google text')
>>> doc.find_ents()
[('Google', 'ORG')]
"""
lang = self.language if self.is_reliable_language else self.hint_language
return list({tuple(getattr_(ent, attr) for attr in ent_attributes)
for ent in self._load_spacy_doc(lang, model_name).ents})
def match(self, matcher):
"""
Run a SpaCy matcher over the cleaned content
>>> import spacy.matcher
>>> from textpipe.doc import Doc
>>> matcher = spacy.matcher.Matcher(spacy.lang.en.English().vocab)
>>> matcher.add('HASHTAG', None, [{'ORTH': '#'}, {'IS_ASCII': True}])
>>> Doc('Test with #hashtag').match(matcher)
[('#hashtag', 'HASHTAG')]
"""
return [(self._spacy_doc[start:end].text, matcher.vocab.strings[match_id])
for match_id, start, end in matcher(self._spacy_doc)]
@property
def emojis(self):
"""
Emojis detected using SpaCy matcher over the cleaned content, with unicode name and
sentiment score.
>>> from pprint import pprint
>>> from textpipe.doc import Doc
>>> pprint(Doc('Test with emoji 😀 😋 ').emojis)
[('😀', 'GRINNING FACE', 0.571753986332574),
('😋', 'FACE SAVOURING DELICIOUS FOOD', 0.6335149863760218)]
"""
detected_emojis = []
matcher = spacy.matcher.Matcher(self._spacy_doc.vocab)
for emoji, unicode_name in EMOJI_TO_UNICODE_NAME.items():
matcher.add(unicode_name, None, [{'ORTH': emoji}])
for emoji, unicode_name in self.match(matcher):
detected_emojis.append((emoji, unicode_name, EMOJI_TO_SENTIMENT[emoji]))
return detected_emojis
@property
def nsents(self):
"""
Extract the number of sentences from text
>>> from textpipe.doc import Doc
>>> doc = Doc('Test sentence for testing text. And another sentence for testing!')
>>> doc.nsents
2
"""
return len(list(self._spacy_doc.sents))
@property
def sents(self):
"""
Extract the text and character offset (begin) of sentences from text
>>> from pprint import pprint
>>> from textpipe.doc import Doc
>>> doc = Doc('Test sentence for testing text. '
... 'And another one with, some, punctuation! And stuff.')
>>> pprint(doc.sents)
[('Test sentence for testing text.', 0),
('And another one with, some, punctuation!', 32),
('And stuff.', 73)]
"""
return [(span.text, span.start_char) for span in self._spacy_doc.sents]
@property
def nwords(self):
"""
Extract the number of words from text
>>> from textpipe.doc import Doc
>>> doc = Doc('Test sentence for testing text')
>>> doc.nwords
5
"""
return len(self.words)
@property
def words(self):
"""
Extract the text and character offset (begin) of words from text
>>> from textpipe.doc import Doc
>>> doc = Doc('Test sentence for testing text.')
>>> doc.words
[('Test', 0), ('sentence', 5), ('for', 14), ('testing', 18), ('text', 26), ('.', 30)]
"""
return [(token.text, token.idx) for token in self._spacy_doc]
@property
def word_counts(self):
"""
Extract words with their counts
>>> from pprint import pprint
>>> from textpipe.doc import Doc
>>> pprint(Doc('Test sentence for testing vectorisation of a sentence.').word_counts)
{'.': 1,
'Test': 1,
'a': 1,
'for': 1,
'of': 1,
'sentence': 2,
'testing': 1,
'vectorisation': 1}
"""
return dict(Counter(word for word, _ in self.words))
@property
def complexity(self):
"""
Determine the complexity of text using the Flesch
reading ease test ranging from 0.0 - 100.0 with 0.0
being the most difficult to read.
>>> from textpipe.doc import Doc
>>> doc = Doc('Test sentence for testing text')
>>> doc.complexity
83.32000000000004
"""
if not self._text_stats:
self._text_stats = textacy.TextStats(self._spacy_doc)
if self._text_stats.n_syllables == 0:
return 100
return self._text_stats.flesch_reading_ease
@property
def sentiment(self):
"""
Returns polarity score (-1 to 1) and a subjectivity score (0 to 1)
Currently only English, Dutch, French and Italian supported
>>> from textpipe.doc import Doc
>>> doc = Doc('Dit is een leuke zin.')
>>> doc.sentiment
(0.6, 0.9666666666666667)
"""
if self.language == 'en':
from pattern.text.en import sentiment as sentiment_en # pylint: disable=import-outside-toplevel
return sentiment_en(self.clean)
if self.language == 'nl':
from pattern.text.nl import sentiment as sentiment_nl # pylint: disable=import-outside-toplevel
return sentiment_nl(self.clean)
if self.language == 'fr':
from pattern.text.fr import sentiment as sentiment_fr # pylint: disable=import-outside-toplevel
return sentiment_fr(self.clean)
if self.language == 'it':
from pattern.text.it import sentiment as sentiment_it # pylint: disable=import-outside-toplevel
return sentiment_it(self.clean)
raise TextpipeMissingModelException(f'No sentiment model for {self.language}')
@functools.lru_cache()
def extract_keyterms(self, ranker='textrank', n_terms=10, **kwargs):
"""
Extract and rank key terms in the document by proxying to
`textacy.keyterms`. Returns a list of (term, score) tuples. Depending
on the ranking algorithm used, terms can consist of multiple words.
Available rankers are TextRank (textrank), SingleRank (singlerank) and
SGRank ('sgrank').
>>> from pprint import pprint
>>> from textpipe.doc import Doc
>>> doc = Doc('Amsterdam is the awesome capital of the Netherlands.')
>>> pprint(doc.extract_keyterms(n_terms=3))
[('awesome capital', 0.2384595585785324),
('Netherlands', 0.1312376837867799),
('Amsterdam', 0.07452637881734389)]
>>> pprint(doc.extract_keyterms(ranker='sgrank'))
[('awesome capital', 0.5638711013322963),
('Netherlands', 0.22636566128805719),
('Amsterdam', 0.20976323737964653)]
>>> pprint(doc.extract_keyterms(ranker='sgrank', ngrams=(1, )))
[('Netherlands', 0.40349153868348353),
('capital', 0.29176091721189235),
('awesome', 0.18150539145949696),
('Amsterdam', 0.12324215264512725)]
"""
if self.nwords < 1:
return []
rankers = ['textrank', 'sgrank', 'scake', 'yake']
if ranker not in rankers:
raise ValueError(f'ranker "{ranker}" not available; use one '
f'of {rankers}')
ranking_fn = getattr(textacy.ke, ranker)
return ranking_fn(self._spacy_doc, topn=n_terms, **kwargs)
@property
def keyterms(self):
"""
Return textranked keyterms for the document.
>>> from pprint import pprint
>>> from textpipe.doc import Doc
>>> doc = Doc('Amsterdam is the awesome capital of the Netherlands.')
>>> pprint(doc.extract_keyterms(n_terms=3))
[('awesome capital', 0.2384595585785324),
('Netherlands', 0.1312376837867799),
('Amsterdam', 0.07452637881734389)]
"""
return self.extract_keyterms()
@property
def minhash(self):
"""
A cheap way to compute a hash for finding similarity of docs
Source: https://ekzhu.github.io/datasketch/minhash.html
>>> from textpipe.doc import Doc
>>> doc = Doc('Sentence for computing the minhash')
>>> doc.minhash[:5]
[407326892, 814360600, 1099082245, 1176349439, 1735256]
"""
return self.find_minhash()
@functools.lru_cache()
def find_minhash(self, num_perm=128):
"""
Compute minhash, cached.
"""
words = self.words
doc_hash = MinHash(num_perm=num_perm)
for word, _ in words:
doc_hash.update(word.encode('utf8'))
return list(doc_hash.digest())
def similarity(self, other_doc, metric='jaccard', hash_method='minhash'):
"""
Computes similarity for two documents.
Only minhash Jaccard similarity is implemented.
>>> from textpipe.doc import Doc
>>> doc1 = Doc('Sentence for computing the minhash')
>>> doc2 = Doc('Sentence for computing the similarity')
>>> doc1.similarity(doc2)
0.7265625
"""
if hash_method == 'minhash' and metric == 'jaccard':
hash1 = MinHash(hashvalues=self.minhash)
hash2 = MinHash(hashvalues=other_doc.minhash)
return hash1.jaccard(hash2)
raise NotImplementedError(f'Metric/hash method combination {metric}'
f'/{hash_method} is not implemented as similarity metric')
@property
def word_vectors(self):
"""
Returns word embeddings for the words in the document.
"""
return self.generate_word_vectors()
@functools.lru_cache()
def generate_word_vectors(self, model_name=None):
"""
Returns word embeddings for the words in the document.
The default spacy models don't have "true" word vectors
but only context-sensitive tensors that are within the document.
Returns:
A dictionary mapping words from the document to a dict with the
corresponding values of the following variables:
has vector: Does the token have a vector representation?
vector norm: The L2 norm of the token's vector (the square root of the
sum of the values squared)
OOV: Out-of-vocabulary (This variable always gets the value True since
there are no vectors included in the model)
vector: The vector representation of the word
>>> from textpipe.doc import Doc
>>> doc = Doc('Test sentence')
>>> doc.word_vectors['Test']['is_oov']
True
>>> len(doc.word_vectors['Test']['vector'])
96
>>> doc.word_vectors['Test']['vector_norm'] == doc.word_vectors['sentence']['vector_norm']
False
"""
lang = self.language if self.is_reliable_language else self.hint_language
return {token.text: {'has_vector': token.has_vector,
'vector_norm': token.vector_norm,
'is_oov': token.is_oov,
'vector': token.vector.tolist()}
for token in self._load_spacy_doc(lang, model_name)}
@property
def doc_vector(self):
"""
Returns document embeddings based on the words in the document.
>>> import numpy
>>> from textpipe.doc import Doc
>>> numpy.array_equiv(Doc('a b').doc_vector, Doc('a b').doc_vector)
True
>>> numpy.array_equiv(Doc('a b').doc_vector, Doc('a a b').doc_vector)
False
"""
return self.aggregate_word_vectors()
@functools.lru_cache()
def aggregate_word_vectors(self,
model_name=None,
aggregation='mean',
normalize=False,
exclude_oov=False):
"""
Returns document embeddings based on the words in the document.
>>> import numpy
>>> from textpipe.doc import Doc
>>> doc1 = Doc('a b')
>>> doc2 = Doc('a a b')
>>> numpy.array_equiv(doc1.aggregate_word_vectors(), doc1.aggregate_word_vectors())
True
>>> numpy.array_equiv(doc1.aggregate_word_vectors(), doc2.aggregate_word_vectors())
False
>>> numpy.array_equiv(doc1.aggregate_word_vectors(aggregation='mean'),
... doc2.aggregate_word_vectors(aggregation='sum'))
False
>>> numpy.array_equiv(doc1.aggregate_word_vectors(aggregation='mean'),
... doc2.aggregate_word_vectors(aggregation='var'))
False
>>> numpy.array_equiv(doc1.aggregate_word_vectors(aggregation='sum'),
... doc2.aggregate_word_vectors(aggregation='var'))
False
>>> doc = Doc('sentence with an out of vector word lsseofn')
>>> len(doc.aggregate_word_vectors())
96
>>> numpy.array_equiv(doc.aggregate_word_vectors(exclude_oov=False),
... doc.aggregate_word_vectors(exclude_oov=True))
False
"""
lang = self.language if self.is_reliable_language else self.hint_language
tokens = [token for token in self._load_spacy_doc(lang, model_name)
if not exclude_oov or not token.is_oov]
vectors = [token.vector / token.vector_norm if normalize else token.vector
for token in tokens]
if aggregation == 'mean':
return numpy.mean(vectors, axis=0).tolist()
if aggregation == 'sum':
return numpy.sum(vectors, axis=0).tolist()
if aggregation == 'var':
return numpy.var(vectors, axis=0).tolist()
raise NotImplementedError(f'Aggregation method {aggregation} is not implemented.')
def _load_gensim_word2vec_model(self,
model_uri=None,
max_lru_cache_size=1024):
"""
Loads pre-trained Gensim word2vec keyed vector model from either local or Redis
>>> from textpipe.doc import Doc
>>> model = Doc('')._load_gensim_word2vec_model('tests/models/gensim_test_nl.kv')
>>> type(model)
<class 'gensim.models.keyedvectors.Word2VecKeyedVectors'>
"""
lang = self.language if self.is_reliable_language else self.hint_language
if not self._gensim_vectors or lang not in self._gensim_vectors:
if urlparse(model_uri).scheme == 'redis':
vectors = RedisKeyedVectors(model_uri,
lang,
max_lru_cache_size)
if not vectors.exists:
raise TextpipeMissingModelException(f'Redis does not contain a model '
f'for language {lang}. The model '
f'needs to be loaded before use '
f'(see load_keyed_vectors_into_redis).')
elif model_uri:
try:
vectors = KeyedVectors.load(model_uri, mmap='r')
self.nr_train_tokens = sum(token_vocab.count for token_vocab in
vectors.vocab.values())
except FileNotFoundError:
# pylint: disable=raise-missing-from
raise TextpipeMissingModelException(
f'Gensim keyed vector file {model_uri} is not available.')
else:
raise TextpipeMissingModelException(
'Either specify model filename or redis URI')
self._gensim_vectors[lang] = vectors
return self._gensim_vectors[lang]
@functools.lru_cache()
def generate_gensim_document_embedding(self,
model_uri=None,
lowercase=True,
max_lru_cache_size=1024,
idf_weighting='naive'):
"""
Returns document embeddings generated with Gensim word2vec model.
idf_weighting scheme can be 'naive' or 'log'
>>> import numpy
>>> from textpipe.doc import Doc
>>> doc1 = Doc('textmining is verwant aan tekstanalyse')
>>> doc2 = Doc('textmining is verwant aan textmining')
>>> doc3 = Doc('tekstanalyse is verwant aan textmining')
>>> test_model_file = 'tests/models/gensim_test_nl.kv'
>>> numpy.allclose(doc1.generate_gensim_document_embedding(model_uri=test_model_file), \
doc2.generate_gensim_document_embedding(model_uri=test_model_file))
False
>>> numpy.allclose(doc1.generate_gensim_document_embedding(model_uri=test_model_file), \
doc3.generate_gensim_document_embedding(model_uri=test_model_file))
True
"""
if not model_uri:
raise TextpipeMissingModelException('No Gensim keyed vector location specified.')
model = self._load_gensim_word2vec_model(model_uri,
max_lru_cache_size)
if lowercase:
prepared_word_counts = [(word.lower(), count)
for word, count in self.word_counts.items()
if word.lower() in model]
else:
prepared_word_counts = [(word, count) for word, count in self.word_counts.items()
if word in model]
if not prepared_word_counts:
return []
if isinstance(model, RedisKeyedVectors):
# For redis, the word vectors are already divided by the idf when a word2vec model
# was loaded (see RedisKeyedVectors.load_keyed_vectors_into_redis)
if model.idf_weighting != idf_weighting:
raise RedisIDFWeightingMismatchException(f'The specified document embedding idf '
f'weighting "{idf_weighting}" does not '
f'match weighting in RedisKeyedVector "'
f'{model.idf_weighting}"')
vectors = [model[word] * count
for word, count in prepared_word_counts]
else:
vectors = []
for word, count in prepared_word_counts:
if idf_weighting == 'naive':
idf = model.vocab[word].count
elif idf_weighting == 'log':
idf = (numpy.log(self.nr_train_tokens / (model.vocab[word].count + 1)) + 1)
else:
raise ValueError(f'idf_weighting "{idf_weighting}" not available; use '
f'"naive" or "log"')
vectors.append(model[word] * (count / idf))
return list(sum(vectors))
@functools.lru_cache()
def generate_textrank_summary(self, ratio=0.2, word_count=None):
"""
returns a textrank summary of the document (extractive summary) generated with gensim
returns an empty summary if the text could not be compressed
if both ratio and word_count are provided, ratio is ignored
"""
try:
return summarize(self._spacy_doc.text, ratio=ratio, word_count=word_count, split=True)
except ValueError:
return []
@property
def summary(self):
"""
returns a textrank summary of the document (extractive summary)
>>> from pprint import pprint
>>> from textpipe.doc import Doc
>>> text = '''Rice Pudding - Poem by Alan Alexander Milne
... What is the matter with Mary Jane?
... She's crying with all her might and main,
... And she won't eat her dinner - rice pudding again -
... What is the matter with Mary Jane?
... What is the matter with Mary Jane?
... I've promised her dolls and a daisy-chain,
... And a book about animals - all in vain -
... What is the matter with Mary Jane?
... What is the matter with Mary Jane?
... She's perfectly well, and she hasn't a pain;
... But, look at her, now she's beginning again! -
... What is the matter with Mary Jane?
... What is the matter with Mary Jane?
... I've promised her sweets and a ride in the train,
... And I've begged her to stop for a bit and explain -
... What is the matter with Mary Jane?
... What is the matter with Mary Jane?
... She's perfectly well and she hasn't a pain,
... And it's lovely rice pudding for dinner again!
... What is the matter with Mary Jane?'''
>>> document = Doc(text)
>>> pprint(document.summary)
["She's crying with all her might and main, And she won't eat her dinner - "
'rice pudding again - What is the matter with Mary Jane?',
"She's perfectly well and she hasn't a pain, And it's lovely rice pudding for "
'dinner again!']
>>> document = Doc('just 1 sentence.')
>>> document.summary
[]
"""
return self.generate_textrank_summary()
def extract_lead(self, nsents=3):
"""
returns the lead-3 sentences (text only) of the document
if the text is smaller than the requested N, return full text
>>> from pprint import pprint
>>> from textpipe.doc import Doc
>>> text = '''Rice Pudding - Poem by Alan Alexander Milne.
... What is the matter with Mary Jane?
... She's crying with all her might and main,
... And she won't eat her dinner - rice pudding again.
... What is the matter with Mary Jane? '''
>>> document = Doc(text)
>>> pprint(document.extract_lead())
['Rice Pudding - Poem by Alan Alexander Milne.',
'What is the matter with Mary Jane?',
"She's crying with all her might and main, And she won't eat her dinner - "
'rice pudding again.']
"""
return [s[0] for s in self.sents[:nsents]]
@property
def cats(self):
"""
A dict of categories and their probability in the text.
>>> from textpipe.doc import Doc
>>> doc = Doc('Sentence for testing text categorization')
>>> doc.cats
{}
"""
return self.get_cats()
@functools.lru_cache()
def get_cats(self, model_name=None):
"""
Extract a dict of categories and their probability in the text, with the possibility
of using a custom model.
>>> from textpipe.doc import Doc
>>> doc = Doc('Sentence for testing text categorization')
>>> doc.get_cats()
{}
"""
lang = self.language if self.is_reliable_language else self.hint_language
return self._load_spacy_doc(lang, model_name).cats