import re import numpy as np from functools import reduce from keras.preprocessing.sequence import pad_sequences def tokenize(sent): return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()] # Parse stories provided in bAbi tasks format def parse_stories(lines, only_supporting=False): data = [] story = [] for line in lines: line = line.decode('utf-8').strip() nid, line = line.split(' ', 1) nid = int(nid) if nid == 1: story = [] if '\t' in line: q, a, supporting = line.split('\t') q = tokenize(q) substory = [x for x in story if x] data.append((substory, q, a)) story.append('') else: sent = tokenize(line) story.append(sent) return data def get_stories(f): with open(f, 'rb') as infile: data = parse_stories(infile.readlines()) flatten = lambda data: reduce(lambda x, y: x + y, data) data = [(flatten(story), q, answer) for story, q, answer in data] return data def vectorize_stories(data, word_idx, story_maxlen, query_maxlen): xs = [] xqs = [] ys = [] for story, query, answer in data: x = [word_idx[w] for w in story] xq = [word_idx[w] for w in query] y = np.zeros(len(word_idx) + 1) y[word_idx[answer]] = 1 xs.append(x) xqs.append(xq) ys.append(y) return pad_sequences(xs, maxlen=story_maxlen), pad_sequences(xqs, maxlen=query_maxlen), np.array(ys)