[nlp] lang = "en" pipeline = ["llm"] [components] [components.llm] factory = "llm" save_io = true [components.llm.task] @llm_tasks = "spacy.TextCat.v3" labels = ["DOCUMENTATION", "BUG"] exclusive_classes = false [components.llm.task.label_definitions] DOCUMENTATION = "Issue about technical documentation" BUG = "Issue about a fault in the software" [components.llm.model] @llm_models = "spacy.GPT-3-5.v1" config = {"temperature": 0.3} [components.llm.cache] @llm_misc = "spacy.BatchCache.v1" path = "/vol/prodigy_data/local-cache" batch_size = 3 max_batches_in_mem = 10