@@ -12,7 +12,6 @@ dataset:
12
12
enabled : true
13
13
n_samples : 50
14
14
random_seed : 42
15
-
16
15
17
16
# Post-processing Configuration
18
17
post_processing :
@@ -30,18 +29,18 @@ post_processing:
30
29
31
30
# Model Configuration
32
31
model :
32
+ # Provider-agnostic model configuration
33
+ provider : " huggingface" # Options: huggingface, openai
33
34
# Model task type for Fairness Compass routing
34
35
model_task : " binary_classification" # Options: binary_classification, multiclass_classification, regression, generation, ranking
35
- label_behavior : " binary" # Options: binary, categorical, continuous
36
-
37
- # Hugging Face model configuration
38
- huggingface :
39
- enabled : true
40
- model_id : " TinyLlama/TinyLlama-1.1B-Chat-v1.0"
41
- device : " cuda" # or "cpu" for CPU-only inference
42
- max_new_tokens : 50
43
- temperature : 0.7
44
- top_p : 0.9
36
+ label_behavior : " binary" # Options: binary, categorical, continuous
37
+ model_id : " TinyLlama/TinyLlama-1.1B-Chat-v1.0"
38
+ device : " cuda" # or "cpu" for CPU-only inference
39
+ max_new_tokens : 50
40
+ temperature : 0.7
41
+ top_p : 0.9
42
+ # Optional for remote providers (OpenAI-compatible endpoints)
43
+ base_url : null
45
44
46
45
# Prompting Configuration
47
46
prompting :
@@ -72,5 +71,5 @@ metrics:
72
71
- " precision"
73
72
74
73
artifacts :
75
- inference_results_path : " artifacts/cleaned_inference_results .csv"
76
- postprocessed_results_path : " artifacts/postprocessed_results.csv"
74
+ inference_results_path : " artifacts/inference_results .csv"
75
+ postprocessed_results_path : " artifacts/postprocessed_results.csv"
0 commit comments