-
Notifications
You must be signed in to change notification settings - Fork 195
Expand file tree
/
Copy pathresource_loader.py
More file actions
1036 lines (855 loc) · 37.4 KB
/
resource_loader.py
File metadata and controls
1036 lines (855 loc) · 37.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the processor resource loader.
"""
import hashlib
import json
import logging
import os
import re
import time
from collections import Counter
from copy import deepcopy
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from . import markup, path
from .constants import DEFAULT_TRAIN_SET_REGEX
from .core import Entity
from .exceptions import MindMeldError
from .gazetteer import Gazetteer
from .models.helpers import (CHAR_NGRAM_FREQ_RSC, ENABLE_STEMMING, GAZETTEER_RSC, QUERY_FREQ_RSC,
SENTIMENT_ANALYZER, SYS_TYPES_RSC, WORD_FREQ_RSC, WORD_NGRAM_FREQ_RSC,
OUT_OF_BOUNDS_TOKEN, mask_numerics)
from .path import MODEL_CACHE_PATH
from .query_cache import QueryCache
from .query_factory import QueryFactory
logger = logging.getLogger(__name__)
class ProcessedQueryList:
"""
ProcessedQueryList provides a memory efficient disk backed list representation
for a list of queries.
"""
def __init__(self, cache=None, elements=None):
self._cache = cache
self.elements = elements or []
@property
def cache(self):
return self._cache
@cache.setter
def cache(self, cache):
self._cache = cache
def append(self, query_id):
self.elements.append(query_id)
def extend(self, query_ids):
self.elements.extend(query_ids)
def __len__(self):
return len(self.elements)
def __bool__(self):
return bool(len(self))
def processed_queries(self):
return ProcessedQueryList.Iterator(self)
def raw_queries(self):
return ProcessedQueryList.RawQueryIterator(self)
def queries(self):
return ProcessedQueryList.QueryIterator(self)
def entities(self):
return ProcessedQueryList.EntitiesIterator(self)
def domains(self):
return ProcessedQueryList.DomainIterator(self)
def intents(self):
return ProcessedQueryList.IntentIterator(self)
@staticmethod
def from_in_memory_list(queries):
"""
Creates a ProcessedQueryList wrapper around an
in-memory list of ProcessedQuery objects
Args:
queries (list(ProcessedQuery)): queries to wrap
Returns:
ProcessedQueryList object
"""
return ProcessedQueryList(
cache=ProcessedQueryList.MemoryCache(queries),
elements=list(range(len(queries)))
)
class Iterator:
def __init__(self, source, cached=False):
self.source = source
self.elements = source.elements
self.result_cache = [] if not cached else [None] * len(source)
self.iter_idx = -1
def __iter__(self):
self.iter_idx = -1
return self
def __next__(self):
self.iter_idx += 1
if self.iter_idx == len(self):
raise StopIteration
if self.result_cache and self.result_cache[self.iter_idx] is not None:
return self.result_cache[self.iter_idx]
result = self[self.iter_idx]
if self.result_cache:
self.result_cache[self.iter_idx] = result
return result
def __len__(self):
return len(self.elements)
def __getitem__(self, key):
"""
Override this function for getting other types of cached data.
"""
return self.source.cache.get(self.elements[key])
def reorder(self, indices):
self.elements = [self.elements[i] for i in indices]
if self.result_cache:
self.result_cache = [self.result_cache[i] for i in indices]
class RawQueryIterator(Iterator):
def __getitem__(self, key):
return self.source.cache.get_raw_query(self.elements[key])
class QueryIterator(Iterator):
def __getitem__(self, key):
return self.source.cache.get_query(self.elements[key])
class EntitiesIterator(Iterator):
def __getitem__(self, key):
return self.source.cache.get_entities(self.elements[key])
class DomainIterator(Iterator):
def __init__(self, source):
# Caches the elements in memory if they are backed by sqlite3
cached = not isinstance(source.cache, ProcessedQueryList.MemoryCache)
super().__init__(source, cached=cached)
def __getitem__(self, key):
return self.source.cache.get_domain(self.elements[key])
class IntentIterator(Iterator):
def __init__(self, source):
# Caches the elements in memory if they are backed by sqlite3
cached = not isinstance(source.cache, ProcessedQueryList.MemoryCache)
super().__init__(source, cached=cached)
def __getitem__(self, key):
return self.source.cache.get_intent(self.elements[key])
class ListIterator(Iterator):
"""
ListIterator is a wrapper around a in memory list that supports the same
functionality as a ProcessedQueryList.Iterator. This allows building
of arbitrary lists of data and presenting them as a
ProcessedQueryList.Iterator to functions that require them.
"""
def __init__(self, elements):
self.source = None
self.elements = elements
self.result_cache = None
self.iter_idx = -1
def __getitem__(self, key):
return self.elements[key]
class MemoryCache:
"""
A class to provide cache functionality for in-memory
lists of ProcessedQuery objects
"""
def __init__(self, queries):
self.queries = queries
def get(self, row_id):
return self.queries[row_id]
def get_raw_query(self, row_id):
return self.get(row_id).query.text
def get_query(self, row_id):
return self.get(row_id).query
def get_entities(self, row_id):
return self.get(row_id).entities
def get_domain(self, row_id):
return self.get(row_id).domain
def get_intent(self, row_id):
return self.get(row_id).intent
class ResourceLoader:
"""ResourceLoader objects are responsible for loading resources necessary for nlp components
(classifiers, entity recognizer, parsers, etc).
Note: we need to keep resource helpers as instance methods, as ``load_feature_resource``
assumes all helpers to be instance methods.
"""
def __init__(self, app_path, query_factory, query_cache=None):
self.app_path = app_path
self.query_factory = query_factory
# Example Layout: {
# 'entity_type': {
# 'entity_data': { # for gazetteer.txt
# 'loaded': '', # the time the file was last loaded
# 'modified': '', # the time the file was last modified
# 'data': contents of file
# },
# 'mapping': { # for mapping.json
# 'loaded': '', # the time the file was last loaded
# 'modified': '', # the time the file was last modified
# },
# 'gazetteer': { # for pickled gazetteer
# 'loaded': '', # the time the file was last loaded
# 'modified': '', # the time the file was last modified
# }
# }
# }
self._entity_files = {}
# Example layout: {
# 'domain': {
# 'intent': {
# 'filename': {
# 'modified': '', # the time the file was last modified
# 'loaded': '', # the time the file was last loaded
# 'queries': [], # the queries loaded from the file
# }
# }
# }
# }
self.file_to_query_info = {}
self._hasher = Hasher()
self._query_cache = query_cache
self._hash_to_model_path = None
@property
def query_cache(self):
"""
Lazy load the query cache since it's not required for inference.
"""
if not self._query_cache:
text_prep_hash = self.query_factory.text_preparation_pipeline.get_hashid()
self._query_cache = QueryCache(app_path=self.app_path,
schema_version_hash=text_prep_hash)
return self._query_cache
@property
def hash_to_model_path(self):
"""dict: A dictionary that maps hashes to the file path of the classifier."""
if self._hash_to_model_path is None:
self._load_cached_models()
return self._hash_to_model_path
def get_gazetteers(self, force_reload=False):
"""Gets gazetteers for all entities.
Returns:
dict: Gazetteer data keyed by entity type
"""
# TODO: get role gazetteers
entity_types = path.get_entity_types(self.app_path)
return {
entity_type: self.get_gazetteer(entity_type, force_reload=force_reload)
for entity_type in entity_types
}
def get_gazetteer(self, gaz_name, force_reload=False):
"""Gets a gazetteers by name.
Args:
gaz_name (str): The name of the entity the gazetteer corresponds to
Returns:
dict: Gazetteer data
"""
self._update_entity_file_dates(gaz_name)
# TODO: get role gazetteers
if self._gaz_needs_build(gaz_name) or force_reload:
self.build_gazetteer(gaz_name, force_reload=force_reload)
if self._entity_file_needs_load("gazetteer", gaz_name):
self.load_gazetteer(gaz_name)
return self._entity_files[gaz_name]["gazetteer"]["data"]
def get_text_preparation_pipeline(self):
"""Get the tokenizer from the query_factory attribute
Returns:
text_preparation_pipeline (TextPreparationPipeline): Class responsible for
the normalization and tokenization of text.
"""
return self.query_factory.text_preparation_pipeline
@staticmethod
def get_sentiment_analyzer():
"""
Returns a sentiment analyzer and downloads the necessary data libraries required from nltk
"""
try:
nltk.data.find("sentiment/vader_lexicon.zip")
except LookupError:
logger.info("Downloading lexicon for sentiment analysis")
nltk.download("vader_lexicon")
return SentimentIntensityAnalyzer()
def get_gazetteers_hash(self):
"""
Gets a single hash of all the gazetteer ordered by alphabetical entity type.
Returns:
str: Hash of a list of gazetteer hashes.
"""
entity_types = path.get_entity_types(self.app_path)
return self._hasher.hash_list(
(
self.get_gazetteer_hash(entity_type)
for entity_type in sorted(entity_types)
)
)
def get_gazetteer_hash(self, gaz_name):
"""
Gets the hash of a gazetteer by entity name.
Args:
gaz_name (str): The name of the entity the gazetteer corresponds to
Returns:
str: Hash of a gazetteer specified by name.
"""
self._update_entity_file_dates(gaz_name)
entity_data_path = path.get_entity_gaz_path(self.app_path, gaz_name)
entity_data_hash = self._hasher.hash_file(entity_data_path)
mapping_path = path.get_entity_map_path(self.app_path, gaz_name)
mapping_hash = self._hasher.hash_file(mapping_path)
return self._hasher.hash_list([entity_data_hash, mapping_hash])
def build_gazetteer(self, gaz_name, exclude_ngrams=False, force_reload=False):
"""Builds the specified gazetteer using the entity data and mapping files.
Args:
gaz_name (str): The name of the entity the gazetteer corresponds to
exclude_ngrams (bool, optional): Whether partial matches of
entities should be included in the gazetteer
force_reload (bool, optional): Whether file should be forcefully
reloaded from disk
"""
popularity_cutoff = 0.0
logger.info("Building gazetteer '%s'", gaz_name)
# TODO: support role gazetteers
gaz = Gazetteer(gaz_name, self.get_text_preparation_pipeline(), exclude_ngrams)
entity_data_path = path.get_entity_gaz_path(self.app_path, gaz_name)
gaz.update_with_entity_data_file(
entity_data_path, popularity_cutoff, self.query_factory.normalize
)
self._entity_files[gaz_name]["entity_data"]["loaded"] = time.time()
mapping = self.get_entity_map(gaz_name, force_reload=force_reload)
gaz.update_with_entity_map(
mapping.get("entities", []), self.query_factory.normalize
)
gaz_path = path.get_gazetteer_data_path(self.app_path, gaz_name)
gaz.dump(gaz_path)
self._entity_files[gaz_name]["gazetteer"]["data"] = gaz.to_dict()
self._entity_files[gaz_name]["gazetteer"]["loaded"] = time.time()
def load_gazetteer(self, gaz_name):
"""
Loads a gazetteer specified by the entity name.
Args:
gaz_name (str): The name of the entity the gazetteer corresponds to
"""
gaz = Gazetteer(gaz_name, self.get_text_preparation_pipeline())
gaz_path = path.get_gazetteer_data_path(self.app_path, gaz_name)
gaz.load(gaz_path)
self._entity_files[gaz_name]["gazetteer"]["data"] = gaz.to_dict()
self._entity_files[gaz_name]["gazetteer"]["loaded"] = time.time()
def get_entity_map(self, entity_type, force_reload=False):
"""Creates a mapping file for a given entity.
Args:
entity_type (str): The name of the entity
"""
self._update_entity_file_dates(entity_type)
if self._entity_file_needs_load("mapping", entity_type) or force_reload:
# file is out of date, load it
self.load_entity_map(entity_type)
return deepcopy(self._entity_files[entity_type]["mapping"]["data"])
def load_entity_map(self, entity_type):
"""Loads an entity mapping file.
Args:
entity_type (str): The name of the entity
"""
file_path = path.get_entity_map_path(self.app_path, entity_type)
logger.debug("Loading entity map from file '%s'", file_path)
if not os.path.isfile(file_path):
logger.warning("Entity map file not found at %s", file_path)
json_data = {}
else:
try:
with open(file_path, "r") as json_file:
json_data = json.load(json_file)
except json.JSONDecodeError as e:
raise MindMeldError(
"Could not load entity map (Invalid JSON): {!r}".format(file_path)
) from e
self._entity_files[entity_type]["mapping"]["data"] = json_data
self._entity_files[entity_type]["mapping"]["loaded"] = time.time()
def _load_cached_models(self):
if not self._hash_to_model_path:
self._hash_to_model_path = {}
cache_path = MODEL_CACHE_PATH.format(app_path=self.app_path)
for dir_path, _, file_names in os.walk(cache_path):
for filename in [f for f in file_names if f.endswith(".hash")]:
file_path = os.path.join(dir_path, filename)
hash_val = open(file_path, "r").read()
classifier_file_path = file_path.split(".hash")[0]
if not os.path.exists(classifier_file_path):
# In some cases, there exists hash file but without a corresponding serialized
# model; which implies that a model was probably not required (eg. only 1 domain
# in case of domain classifier or only 1 intent in an intent classifier or no
# entity types for an entity recognizer)
msg = f"No serialized model available at {classifier_file_path}"
logger.info(msg)
self._hash_to_model_path[hash_val] = classifier_file_path
def _gaz_needs_build(self, gaz_name):
try:
build_time = self._entity_files[gaz_name]["gazetteer"]["modified"]
except KeyError:
# gazetteer hasn't been built
return True
# We changed the value type for the pop_dict dict, so we check to make sure its a tuple
pop_dict = self._entity_files[gaz_name]['gazetteer'].get('data', {}).get('pop_dict')
if pop_dict and not all(isinstance(elem, tuple) for elem in list(pop_dict.keys())):
return True
data_modified = self._entity_files[gaz_name]["entity_data"]["modified"]
mapping_modified = self._entity_files[gaz_name]["mapping"]["modified"]
# If entity data or mapping modified after gaz build -> stale
return build_time < data_modified or build_time < mapping_modified
def _entity_file_needs_load(self, file_type, entity_type):
mod_time = self._entity_files[entity_type][file_type]["modified"]
try:
load_time = self._entity_files[entity_type][file_type]["loaded"]
except KeyError:
# file hasn't been loaded
return True
return load_time < mod_time
def _update_entity_file_dates(self, entity_type):
# TODO: handle deleted entity
file_table = self._entity_files.get(
entity_type, {"entity_data": {}, "mapping": {}, "gazetteer": {}}
)
self._entity_files[entity_type] = file_table
# update entity data
entity_data_path = path.get_entity_gaz_path(self.app_path, entity_type)
try:
file_table["entity_data"]["modified"] = os.path.getmtime(entity_data_path)
except (OSError, IOError):
# required file doesnt exist -- notify and error out
logger.warning(
"Entity data file not found at %r. "
"Proceeding with empty entity data.",
entity_data_path,
)
# mark the modified time as now if the entity file does not exist
# so that the gazetteer gets rebuilt properly.
file_table["entity_data"]["modified"] = time.time()
# update mapping
mapping_path = path.get_entity_map_path(self.app_path, entity_type)
try:
file_table["mapping"]["modified"] = os.path.getmtime(mapping_path)
except (OSError, IOError):
# required file doesnt exist
logger.warning(
"Entity mapping file not found at %r. "
"Proceeding with empty entity data.",
mapping_path,
)
# mark the modified time as now if the entity mapping file does not exist
# so that the gazetteer gets rebuilt properly.
file_table["mapping"]["modified"] = time.time()
# update gaz data
gazetteer_path = path.get_gazetteer_data_path(self.app_path, entity_type)
try:
file_table["gazetteer"]["modified"] = os.path.getmtime(gazetteer_path)
except (OSError, IOError):
# gaz not yet built so set to a time impossibly long ago
file_table["gazetteer"]["modified"] = 0.0
def get_labeled_queries(
self, domain=None, intent=None, label_set=None, force_reload=False
):
"""Gets labeled queries from the cache, or loads them from disk.
Args:
domain (str): The domain of queries to load
intent (str): The intent of queries to load
force_reload (bool): Will not load queries from the cache when True
Returns:
dict: ProcessedQuery objects (or strings) loaded from labeled query files, organized by
domain and intent.
"""
label_set = label_set or DEFAULT_TRAIN_SET_REGEX
query_tree = {}
file_iter = self._traverse_labeled_queries_files(domain, intent, label_set)
for a_domain, an_intent, filename in file_iter:
file_info = self.file_to_query_info[filename]
if force_reload or (
not file_info["loaded"]
or file_info["loaded"] < file_info["modified"]
):
# file is out of date, load it
self.load_query_file(a_domain, an_intent, filename)
if a_domain not in query_tree:
query_tree[a_domain] = {}
if an_intent not in query_tree[a_domain]:
query_tree[a_domain][an_intent] = ProcessedQueryList(self.query_cache)
queries = query_tree[a_domain][an_intent]
queries.extend(file_info["query_ids"])
return query_tree
@staticmethod
def flatten_query_tree(query_tree):
"""
Takes a query tree and returns the elements in list form.
Args:
query_tree (dict): A nested dictionary that organizes queries by domain then intent.
Returns:
list: A list of Query objects.
"""
flattened = ProcessedQueryList()
for _, intent_queries in query_tree.items():
for _, queries in intent_queries.items():
if not flattened.cache:
flattened.cache = queries.cache
elif flattened.cache != queries.cache:
logger.error("query_tree is built from incompatible query_caches")
raise ValueError("query_tree is built from incompatible query_caches")
flattened.extend(queries.elements)
return flattened
def get_flattened_label_set(
self, domain=None, intent=None, label_set=None, force_reload=False
):
return self.flatten_query_tree(
self.get_labeled_queries(domain, intent, label_set, force_reload)
)
def _traverse_labeled_queries_files(
self, domain=None, intent=None, file_pattern=DEFAULT_TRAIN_SET_REGEX
):
provided_intent = intent
query_tree = path.get_labeled_query_tree(self.app_path)
self._update_query_file_dates(query_tree)
domains = [domain] if domain else query_tree.keys()
for a_domain in sorted(domains):
if provided_intent:
intents = [provided_intent]
else:
intents = query_tree[a_domain].keys()
for an_intent in sorted(intents):
files = query_tree[a_domain][an_intent].keys()
# filter to files which belong to the label set
for file_path in sorted(files):
if re.match(file_pattern, os.path.basename(file_path)):
yield a_domain, an_intent, file_path
def get_all_file_paths(self, file_pattern=".*.txt"):
""" Get a list of text file paths across all intents.
Returns:
list: A list of all file paths.
"""
file_iter = self._traverse_labeled_queries_files(file_pattern=file_pattern)
return [filename for _, _, filename in file_iter]
def filter_file_paths(self, compiled_pattern, file_paths=None):
""" Get a list of file paths that match a specific file_pattern
Args:
compiled_pattern (sre.SRE_Pattern): A compiled regex pattern to filter with.
file_paths (list): A list of file paths.
Returns:
list: A list of file paths.
"""
all_file_paths = file_paths or self.get_all_file_paths()
matched_paths = []
for file_path in all_file_paths:
m = compiled_pattern.match(file_path)
if m:
matched_paths.append(m.group())
if len(matched_paths) == 0:
logger.warning("No matches were found for the given compiled pattern")
return matched_paths
def load_query_file(self, domain, intent, file_path):
"""Loads the queries from the specified file.
Args:
domain (str): The domain of the query file
intent (str): The intent of the query file
file_path (str): The name of the query file
"""
logger.info("Loading queries from file %s", file_path)
file_data = self.file_to_query_info[file_path]
query_ids = markup.cache_query_file(
file_path,
self.query_cache,
self.query_factory,
self.app_path,
domain,
intent,
is_gold=True
)
for _id in query_ids:
try:
self._check_query_entities(self.query_cache.get(_id))
except MindMeldError as exc:
logger.warning(exc.message)
file_data["query_ids"] = query_ids
file_data["loaded"] = time.time()
def _check_query_entities(self, query):
entity_types = path.get_entity_types(self.app_path)
for entity in query.entities:
if (
entity.entity.type not in entity_types
and not entity.entity.is_system_entity
):
msg = "Unknown entity {!r} found in query {!r}"
raise MindMeldError(
msg.format(entity.entity.type, query.query.text)
)
def _update_query_file_dates(self, query_tree):
# We can just use this if it this is the first check
new_query_files = {}
for domain in query_tree:
for intent in query_tree[domain]:
for filename in query_tree[domain][intent]:
# filename needs to be full path
new_query_files[filename] = {
"modified": query_tree[domain][intent][filename],
"loaded": None,
}
all_filenames = set(new_query_files.keys())
all_filenames.update(self.file_to_query_info.keys())
for filename in all_filenames:
try:
new_file_info = new_query_files[filename]
except KeyError:
# an old file that was removed
del self.file_to_query_info[filename]
continue
try:
old_file_info = self.file_to_query_info[filename]
except KeyError:
# a new file
self.file_to_query_info[filename] = new_file_info
continue
# file existed before and now -> update
old_file_info["modified"] = new_file_info["modified"]
class WordFreqBuilder:
"""
Compiles unigram frequency dictionary of normalized query tokens
"""
def __init__(self, enable_stemming=False):
self.enable_stemming = enable_stemming
self.tokens = []
def add(self, query):
for i in range(len(query.normalized_tokens)):
tok = query.normalized_tokens[i]
self.tokens.append(mask_numerics(tok))
if self.enable_stemming:
# We only add stemmed tokens that are not the same
# as the original token to reduce the impact on
# word frequencies
stemmed_tok = query.stemmed_tokens[i]
if stemmed_tok != tok:
self.tokens.append(mask_numerics(stemmed_tok))
def get_resource(self):
return Counter(self.tokens)
class CharNgramFreqBuilder:
"""
Compiles n-gram character frequency dictionary of normalized query tokens
"""
def __init__(self, lengths, thresholds):
self.lengths = lengths
self.thresholds = list(thresholds) + [1] * (len(lengths) - len(thresholds))
self.char_freq_dict = Counter()
def add(self, query):
for length, threshold in zip(self.lengths, self.thresholds):
# Minimum value of thresholds must be one to include all ngrams in train files
# i.e, no ngram in our dictionary can occur less than once in our training data
if threshold < 1:
logger.warning(
"Threshold value of n-grams cannot be less than 1."
"Resetting to 1 to extract all character ngrams in the domain",
)
query_text = re.sub(r"\d", "0", query.normalized_text)
character_tokens = [
query_text[i: i + length]
for i in range(len(query_text))
if len(query_text[i: i + length]) == length
]
self.char_freq_dict.update(character_tokens)
def get_resource(self):
return self.char_freq_dict
class WordNgramFreqBuilder:
"""
Compiles n-gram frequency dictionary of normalized query tokens
"""
def __init__(self, lengths, thresholds, enable_stemming=False):
self.lengths = lengths
self.thresholds = list(thresholds) + [1] * (len(lengths) - len(thresholds))
self.enable_stemming = enable_stemming
self.word_freq_dict = Counter()
def add(self, query):
for length, threshold in zip(self.lengths, self.thresholds):
# Minimum value of thresholds must be one to include all ngrams in train files
# i.e, no ngram in our dictionary can occur less than once in our training data
if threshold < 1:
logger.warning(
"Threshold value of n-grams cannot be less than 1."
"Resetting to 1 to extract all ngrams in the domain",
)
ngram_tokens = []
# Adding OOB token for entity bow feature extractor
normalized_tokens = [OUT_OF_BOUNDS_TOKEN] + \
[re.sub(r"\d", "0", tok) for tok in query.normalized_tokens] + \
[OUT_OF_BOUNDS_TOKEN]
if self.enable_stemming:
stemmed_tokens = [OUT_OF_BOUNDS_TOKEN] + \
[re.sub(r"\d", "0", tok) for tok in query.stemmed_tokens] + \
[OUT_OF_BOUNDS_TOKEN]
for i in range(len(normalized_tokens)):
ngram_query = " ".join(normalized_tokens[i: i + length])
ngram_tokens.append(ngram_query)
if self.enable_stemming:
stemmed_ngram_query = " ".join(stemmed_tokens[i: i + length])
if stemmed_ngram_query != ngram_query:
ngram_tokens.append(stemmed_ngram_query)
self.word_freq_dict.update(ngram_tokens)
def get_resource(self):
return self.word_freq_dict
class QueryFreqBuilder:
"""
Compiles frequency dictionary of normalized and stemmed query strings
"""
def __init__(self, enable_stemming=False):
self.enable_stemming = enable_stemming
self.query_dict = Counter()
self.stemmed_query_dict = Counter()
def add(self, query):
self.query_dict.update(["<{}>".format(query.normalized_text)])
if self.enable_stemming:
self.stemmed_query_dict.update(["<{}>".format(query.stemmed_text)])
def get_resource(self):
for query in self.query_dict:
if self.query_dict[query] < 2:
self.query_dict[query] = 0
if self.enable_stemming:
for query in self.stemmed_query_dict:
if self.stemmed_query_dict[query] < 2:
self.stemmed_query_dict[query] = 0
self.query_dict += self.stemmed_query_dict
return self.query_dict
def get_sys_entity_types(self, labels): # pylint: disable=no-self-use
"""Get all system entity types from the entity labels.
Args:
labels (list of QueryEntity): a list of labeled entities
"""
# Build entity types set
entity_types = set()
for label in labels:
for entity in label:
entity_types.add(entity.entity.type)
return set((t for t in entity_types if Entity.is_system_entity(t)))
def hash_feature_resource(self, name):
"""Hashes the named resource.
Args:
name (str): The name of the resource to hash
Returns:
str: The hash result
"""
hash_func = self.RSC_HASH_MAP.get(name)
if hash_func:
return hash_func(self)
else:
raise ValueError("Invalid resource name {!r}.".format(name))
def hash_string(self, string):
"""Hashes a string.
Args:
string (str): The string to hash
Returns:
str: The hash result
"""
return self._hasher.hash(string)
def hash_list(self, items):
"""Hashes the list of items.
Args:
items (list[str]): A list of strings to hash
Returns:
str: The hash result
"""
return self._hasher.hash_list(items)
@staticmethod
def create_resource_loader(app_path, query_factory=None, text_preparation_pipeline=None):
"""Creates the resource loader for the app at app path.
Args:
app_path (str): The path to the directory containing the app's data
query_factory (QueryFactory): The app's query factory
text_preparation_pipeline (TextPreparationPipeline): The app's text preparation
pipeline.
Returns:
ResourceLoader: a resource loader
"""
query_factory = query_factory or QueryFactory.create_query_factory(
app_path, text_preparation_pipeline=text_preparation_pipeline
)
return ResourceLoader(app_path, query_factory)
RSC_HASH_MAP = {
GAZETTEER_RSC: get_gazetteers_hash,
# the following resources only vary based on the queries used for training
WORD_FREQ_RSC: lambda _: "constant",
WORD_NGRAM_FREQ_RSC: lambda _: "constant",
CHAR_NGRAM_FREQ_RSC: lambda _: "constant",
QUERY_FREQ_RSC: lambda _: "constant",
SYS_TYPES_RSC: lambda _: "constant",
ENABLE_STEMMING: lambda _: "constant",
SENTIMENT_ANALYZER: lambda _: "constant",
}
class Hasher:
"""An thin wrapper around hashlib. Uses cache for commonly hashed strings.
Attributes:
algorithm (str): The hashing algorithm to use. Defaults to
'sha1'. See `hashlib.algorithms_available` for a list of
options.
"""
def __init__(self, algorithm="sha1"):
# TODO consider alternative data structure so cache doesnt get too big
self._cache = {}
# intentionally using the setter to check value
self._algorithm = None
self._set_algorithm(algorithm)
def _get_algorithm(self):
"""Getter for algorithm property.
Returns:
str: the hashing algorithm
"""
return self._algorithm
def _set_algorithm(self, value):
"""Setter for algorithm property.
Args:
value (str): The hashing algorithm to use. Defaults
to sha1. See `hashlib.algorithms_available` for a list of
options.
value (str): The hashing algorithm to use.
"""
if value not in hashlib.algorithms_available:
raise ValueError("Invalid hashing algorithm: {!r}".format(value))
if value != self._algorithm:
# reset cache when changing algorithm
self._cache = {}
self._algorithm = value
algorithm = property(_get_algorithm, _set_algorithm)
def hash(self, string):
"""Hashes a string.
Args:
string (str): The string to hash
Returns:
str: The hash result
"""
if string in self._cache:
return self._cache[string]
hash_obj = hashlib.new(self.algorithm)
hash_obj.update(string.encode("utf8"))
result = hash_obj.hexdigest()
self._cache[string] = result
return result