言語処理100本ノック 2020「47. 機能動詞構文のマイニング」
問題文
問題の概要
問題文に提示された仕様に従って出力します。
class Morph: def __init__(self, dc): self.surface = dc['surface'] self.base = dc['base'] self.pos = dc['pos'] self.pos1 = dc['pos1'] class Chunk: def __init__(self, morphs, dst): self.morphs = morphs # 形態素(Morphオブジェクト)のリスト self.dst = dst # 係り先文節インデックス番号 self.srcs = [] # 係り元文節インデックス番号のリスト def parse_cabocha(block): def check_create_chunk(tmp): if len(tmp) > 0: c = Chunk(tmp, dst) res.append(c) tmp = [] return tmp res = [] tmp = [] dst = None for line in block.split('\n'): if line == '': tmp = check_create_chunk(tmp) elif line[0] == '*': dst = line.split(' ')[2].rstrip('D') tmp = check_create_chunk(tmp) else: (surface, attr) = line.split('\t') attr = attr.split(',') lineDict = { 'surface': surface, 'base': attr[6], 'pos': attr[0], 'pos1': attr[1] } tmp.append(Morph(lineDict)) for i, r in enumerate(res): res[int(r.dst)].srcs.append(i) return res filename = 'ch05/ai.ja.txt.cabocha' with open(filename, mode='rt', encoding='utf-8') as f: blocks = f.read().split('EOS\n') blocks = list(filter(lambda x: x != '', blocks)) blocks = [parse_cabocha(block) for block in blocks] with open('ch05/ans47.txt', mode='w') as f: for b in blocks: for i, m in enumerate(b): if 'サ変接続' in [s.pos1 for s in m.morphs] and 'を' in [s.surface for s in m.morphs] and i + 1 < len(b) and b[i + 1].morphs[0].pos == '動詞': text = ''.join([s.surface for s in m.morphs]) + b[i + 1].morphs[0].base if len(m.srcs) > 0: pre_morphs = [b[int(s)].morphs for s in m.srcs] pre_morphs_filtered = [list(filter(lambda x: '助詞' in x.pos, pm)) for pm in pre_morphs] pre_surface = [[p.surface for p in pm] for pm in pre_morphs_filtered] pre_surface = list(filter(lambda x: x != [], pre_surface)) pre_surface = [p[0] for p in pre_surface] pre_text = list(filter(lambda x: '助詞' in [p.pos for p in x], pre_morphs)) pre_text = [''.join([p.surface for p in pt]) for pt in pre_text] if len(pre_surface) > 0: f.writelines('\t'.join([text, ' '.join(pre_surface), ' '.join(pre_text)])) f.write('\n')
言語処理100本ノック 2020「46. 動詞の格フレーム情報の抽出」
問題文
問題の概要
問題文に提示された仕様に従って出力します。
class Morph: def __init__(self, dc): self.surface = dc['surface'] self.base = dc['base'] self.pos = dc['pos'] self.pos1 = dc['pos1'] class Chunk: def __init__(self, morphs, dst): self.morphs = morphs # 形態素(Morphオブジェクト)のリスト self.dst = dst # 係り先文節インデックス番号 self.srcs = [] # 係り元文節インデックス番号のリスト def parse_cabocha(block): def check_create_chunk(tmp): if len(tmp) > 0: c = Chunk(tmp, dst) res.append(c) tmp = [] return tmp res = [] tmp = [] dst = None for line in block.split('\n'): if line == '': tmp = check_create_chunk(tmp) elif line[0] == '*': dst = line.split(' ')[2].rstrip('D') tmp = check_create_chunk(tmp) else: (surface, attr) = line.split('\t') attr = attr.split(',') lineDict = { 'surface': surface, 'base': attr[6], 'pos': attr[0], 'pos1': attr[1] } tmp.append(Morph(lineDict)) for i, r in enumerate(res): res[int(r.dst)].srcs.append(i) return res filename = 'ch05/ai.ja.txt.cabocha' with open(filename, mode='rt', encoding='utf-8') as f: blocks = f.read().split('EOS\n') blocks = list(filter(lambda x: x != '', blocks)) blocks = [parse_cabocha(block) for block in blocks] for b in blocks: for m in b: if len(m.srcs) > 0: pre_morphs = [b[int(s)].morphs for s in m.srcs] pre_morphs_filtered = [list(filter(lambda x: '助詞' in x.pos, pm)) for pm in pre_morphs] pre_surface = [[p.surface for p in pm] for pm in pre_morphs_filtered] pre_surface = list(filter(lambda x: x != [], pre_surface)) pre_surface = [p[0] for p in pre_surface] post_base = [mo.base for mo in m.morphs] post_pos = [mo.pos for mo in m.morphs] if len(pre_surface) > 0 and '動詞' in post_pos: pre_text = list(filter(lambda x: '助詞' in [p.pos for p in x], pre_morphs)) pre_text = [''.join([p.surface for p in pt]) for pt in pre_text] print(post_base[0], ' '.join(pre_surface), ' '.join(pre_text), sep='\t')
言語処理100本ノック 2020「45. 動詞の格パターンの抽出」
問題文
問題の概要
問題文に提示された仕様に従って出力します。
class Morph: def __init__(self, dc): self.surface = dc['surface'] self.base = dc['base'] self.pos = dc['pos'] self.pos1 = dc['pos1'] class Chunk: def __init__(self, morphs, dst): self.morphs = morphs # 形態素(Morphオブジェクト)のリスト self.dst = dst # 係り先文節インデックス番号 self.srcs = [] # 係り元文節インデックス番号のリスト def parse_cabocha(block): def check_create_chunk(tmp): if len(tmp) > 0: c = Chunk(tmp, dst) res.append(c) tmp = [] return tmp res = [] tmp = [] dst = None for line in block.split('\n'): if line == '': tmp = check_create_chunk(tmp) elif line[0] == '*': dst = line.split(' ')[2].rstrip('D') tmp = check_create_chunk(tmp) else: (surface, attr) = line.split('\t') attr = attr.split(',') lineDict = { 'surface': surface, 'base': attr[6], 'pos': attr[0], 'pos1': attr[1] } tmp.append(Morph(lineDict)) for i, r in enumerate(res): res[int(r.dst)].srcs.append(i) return res filename = 'ch05/ai.ja.txt.cabocha' with open(filename, mode='rt', encoding='utf-8') as f: blocks = f.read().split('EOS\n') blocks = list(filter(lambda x: x != '', blocks)) blocks = [parse_cabocha(block) for block in blocks] for b in blocks: for m in b: if len(m.srcs) > 0: pre_morphs = [b[int(s)].morphs for s in m.srcs] pre_morphs = [list(filter(lambda x: '助詞' in x.pos, pm)) for pm in pre_morphs] pre_surface = [[p.surface for p in pm] for pm in pre_morphs] pre_surface = list(filter(lambda x: x != [], pre_surface)) pre_surface = [p[0] for p in pre_surface] post_base = [mo.base for mo in m.morphs] post_pos = [mo.pos for mo in m.morphs] if len(pre_surface) > 0 and '動詞' in post_pos: print(post_base[0], ' '.join(pre_surface), sep='\t')
言語処理100本ノック 2020「44. 係り受け木の可視化」
問題文
問題の概要
問題文にある通り、pydotを用いて有向グラフを可視化します。 pairs
というリスト内に、隣接するノードのペアを格納し pydot.graph_from_edges(pairs)
を呼び出します。
import pydot class Morph: def __init__(self, dc): self.surface = dc['surface'] self.base = dc['base'] self.pos = dc['pos'] self.pos1 = dc['pos1'] class Chunk: def __init__(self, morphs, dst): self.morphs = morphs # 形態素(Morphオブジェクト)のリスト self.dst = dst # 係り先文節インデックス番号 self.srcs = [] # 係り元文節インデックス番号のリスト def parse_cabocha(block): def check_create_chunk(tmp): if len(tmp) > 0: c = Chunk(tmp, dst) res.append(c) tmp = [] return tmp res = [] tmp = [] dst = None for line in block.split('\n'): if line == '': tmp = check_create_chunk(tmp) elif line[0] == '*': dst = line.split(' ')[2].rstrip('D') tmp = check_create_chunk(tmp) else: (surface, attr) = line.split('\t') attr = attr.split(',') lineDict = { 'surface': surface, 'base': attr[6], 'pos': attr[0], 'pos1': attr[1] } tmp.append(Morph(lineDict)) for i, r in enumerate(res): res[int(r.dst)].srcs.append(i) return res filename = 'ch05/ai.ja.txt.cabocha' with open(filename, mode='rt', encoding='utf-8') as f: blocks = f.read().split('EOS\n') blocks = list(filter(lambda x: x != '', blocks)) blocks = [parse_cabocha(block) for block in blocks] pairs = [] target = blocks[7] for m in target: if int(m.dst) > -1: pre_text = ''.join([mo.surface if mo.pos != '記号' else '' for mo in m.morphs]) post_text = ''.join([mo.surface if mo.pos != '記号' else '' for mo in target[int(m.dst)].morphs]) pairs.append([pre_text, post_text]) print(pairs) g = pydot.graph_from_edges(pairs) g.write_png('ch05/ans44.png', prog='dot')
言語処理100本ノック 2020「43. 名詞を含む文節が動詞を含む文節に係るものを抽出」
問題文
問題の概要
「42. 係り元と係り先の文節の表示」に「名詞を含む文節が,動詞を含む文節に係るとき」の条件を付与します。
class Morph: def __init__(self, dc): self.surface = dc['surface'] self.base = dc['base'] self.pos = dc['pos'] self.pos1 = dc['pos1'] class Chunk: def __init__(self, morphs, dst): self.morphs = morphs # 形態素(Morphオブジェクト)のリスト self.dst = dst # 係り先文節インデックス番号 self.srcs = [] # 係り元文節インデックス番号のリスト def parse_cabocha(block): def check_create_chunk(tmp): if len(tmp) > 0: c = Chunk(tmp, dst) res.append(c) tmp = [] return tmp res = [] tmp = [] dst = None for line in block.split('\n'): if line == '': tmp = check_create_chunk(tmp) elif line[0] == '*': dst = line.split(' ')[2].rstrip('D') tmp = check_create_chunk(tmp) else: (surface, attr) = line.split('\t') attr = attr.split(',') lineDict = { 'surface': surface, 'base': attr[6], 'pos': attr[0], 'pos1': attr[1] } tmp.append(Morph(lineDict)) for i, r in enumerate(res): res[int(r.dst)].srcs.append(i) return res filename = 'ch05/ai.ja.txt.cabocha' with open(filename, mode='rt', encoding='utf-8') as f: blocks = f.read().split('EOS\n') blocks = list(filter(lambda x: x != '', blocks)) blocks = [parse_cabocha(block) for block in blocks] for b in blocks: for m in b: if int(m.dst) > -1: pre_text = ''.join([mo.surface if mo.pos != '記号' else '' for mo in m.morphs]) pre_pos = [mo.pos for mo in m.morphs] post_text = ''.join([mo.surface if mo.pos != '記号' else '' for mo in b[int(m.dst)].morphs]) post_pos = [mo.pos for mo in b[int(m.dst)].morphs] if '名詞' in pre_pos and '動詞' in post_pos: print(pre_text, post_text, sep='\t')
言語処理100本ノック 2020「42. 係り元と係り先の文節の表示」
問題文
問題の概要
「41. 係り受け解析結果の読み込み(文節・係り受け)」を活用し、全ての係り受け関係を洗い出します。結合時は、品詞が記号の際には空文字列に置換しています。
class Morph: def __init__(self, dc): self.surface = dc['surface'] self.base = dc['base'] self.pos = dc['pos'] self.pos1 = dc['pos1'] class Chunk: def __init__(self, morphs, dst): self.morphs = morphs # 形態素(Morphオブジェクト)のリスト self.dst = dst # 係り先文節インデックス番号 self.srcs = [] # 係り元文節インデックス番号のリスト def parse_cabocha(block): def check_create_chunk(tmp): if len(tmp) > 0: c = Chunk(tmp, dst) res.append(c) tmp = [] return tmp res = [] tmp = [] dst = None for line in block.split('\n'): if line == '': tmp = check_create_chunk(tmp) elif line[0] == '*': dst = line.split(' ')[2].rstrip('D') tmp = check_create_chunk(tmp) else: (surface, attr) = line.split('\t') attr = attr.split(',') lineDict = { 'surface': surface, 'base': attr[6], 'pos': attr[0], 'pos1': attr[1] } tmp.append(Morph(lineDict)) for i, r in enumerate(res): res[int(r.dst)].srcs.append(i) return res filename = 'ch05/ai.ja.txt.cabocha' with open(filename, mode='rt', encoding='utf-8') as f: blocks = f.read().split('EOS\n') blocks = list(filter(lambda x: x != '', blocks)) blocks = [parse_cabocha(block) for block in blocks] for b in blocks: for m in b: if int(m.dst) > -1: print(''.join([mo.surface if mo.pos != '記号' else '' for mo in m.morphs]), ''.join([mo.surface if mo.pos != '記号' else '' for mo in b[int(m.dst)].morphs]), sep='\t')
言語処理100本ノック 2020「41. 係り受け解析結果の読み込み(文節・係り受け)」
問題文
問題の概要
問題文の指示通り、文節を表すクラス Chunk
を実装します。
class Morph: def __init__(self, dc): self.surface = dc['surface'] self.base = dc['base'] self.pos = dc['pos'] self.pos1 = dc['pos1'] class Chunk: def __init__(self, morphs, dst): self.morphs = morphs # 形態素(Morphオブジェクト)のリスト self.dst = dst # 係り先文節インデックス番号 self.srcs = [] # 係り元文節インデックス番号のリスト def parse_cabocha(block): def check_create_chunk(tmp): if len(tmp) > 0: c = Chunk(tmp, dst) res.append(c) tmp = [] return tmp res = [] tmp = [] dst = None for line in block.split('\n'): if line == '': tmp = check_create_chunk(tmp) elif line[0] == '*': dst = line.split(' ')[2].rstrip('D') tmp = check_create_chunk(tmp) else: (surface, attr) = line.split('\t') attr = attr.split(',') lineDict = { 'surface': surface, 'base': attr[6], 'pos': attr[0], 'pos1': attr[1] } tmp.append(Morph(lineDict)) for i, r in enumerate(res): res[int(r.dst)].srcs.append(i) return res filename = 'ch05/ai.ja.txt.cabocha' with open(filename, mode='rt', encoding='utf-8') as f: blocks = f.read().split('EOS\n') blocks = list(filter(lambda x: x != '', blocks)) blocks = [parse_cabocha(block) for block in blocks] for m in blocks[7]: print([mo.surface for mo in m.morphs], m.dst, m.srcs)