Skip to content

Commit 9461057

Browse files
authored
Updated @available for other platforms (#279)
1 parent 4870d18 commit 9461057

12 files changed

+31
-31
lines changed

Sources/Generation/Decoders.swift

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ import CoreML
33

44
// MARK: Greedy Decoding
55

6-
@available(macOS 15.0, iOS 18.0, *)
6+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
77
func selectNextTokenUsingGreedyDecoding(from scores: MLTensor) -> MLTensor {
88
let indices = scores.argmax(alongAxis: -1).reshaped(to: [1, 1])
99
// Ensure indices are Int32 for concatenation with input tokens
@@ -19,7 +19,7 @@ func selectNextTokenUsingGreedyDecoding(from scores: MLTensor) -> MLTensor {
1919
///
2020
/// - Parameter scores: Processed logits tensor [batch_size, vocab_size]
2121
/// - Returns: Sampled token ID tensor [batch_size, 1]
22-
@available(macOS 15.0, iOS 18.0, *)
22+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
2323
func selectNextTokenUsingSampling(from scores: MLTensor) -> MLTensor {
2424
// Convert logits to probabilities
2525
let probs = scores.softmax(alongAxis: -1)

Sources/Generation/Generation.swift

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ public typealias GenerationOutput = [Int]
3838
/// - Parameter tokens: Input token sequence
3939
/// - Parameter config: Generation configuration
4040
/// - Returns: Logits array for next token prediction
41-
@available(macOS 15.0, iOS 18.0, *)
41+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
4242
public typealias NextTokenModel = (MLTensor, GenerationConfig) async -> MLTensor
4343

4444
/// Callback for receiving generated tokens during streaming.
@@ -48,7 +48,7 @@ public typealias PredictionTokensCallback = (GenerationOutput) -> Void
4848
public typealias PredictionStringCallback = (String) -> Void
4949

5050
/// Protocol for text generation implementations.
51-
@available(macOS 15.0, iOS 18.0, *)
51+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
5252
public protocol Generation {
5353
/// Generates text from a prompt string.
5454
///
@@ -62,7 +62,7 @@ public protocol Generation {
6262
func generate(config: GenerationConfig, prompt: String, model: NextTokenModel, tokenizer: Tokenizer, callback: PredictionStringCallback?) async -> String
6363
}
6464

65-
@available(macOS 15.0, iOS 18.0, *)
65+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
6666
extension Generation {
6767
public func generate(
6868
config: GenerationConfig,
@@ -162,7 +162,7 @@ extension Generation {
162162
}
163163
}
164164

165-
@available(macOS 15.0, iOS 18.0, *)
165+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
166166
public extension Generation {
167167
/// Performs greedy or sampling-based text generation based on generation configuration.
168168
///

Sources/Generation/LogitsWarper/LogitsProcessor.swift

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ import CoreML
88
/// such as temperature scaling, top-k/top-p filtering, and repetition penalties.
99
///
1010
/// Based on: https://github.com/huggingface/transformers/blob/main/src/transformers/generation/logits_process.py
11-
@available(macOS 15.0, iOS 18.0, *)
11+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
1212
public protocol LogitsProcessor {
1313
/// Processes logits for next token prediction.
1414
///
@@ -28,7 +28,7 @@ public protocol LogitsProcessor {
2828
/// This class provides a convenient way to chain multiple logits processors together.
2929
/// Each processor is applied in order to the logits tensor, with the output of one
3030
/// processor becoming the input to the next.
31-
@available(macOS 15.0, iOS 18.0, *)
31+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
3232
public struct LogitsProcessorList {
3333
public var processors: [any LogitsProcessor]
3434

Sources/Generation/LogitsWarper/MinPLogitsWarper.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ import CoreML
1515
///
1616
/// Based on:
1717
/// - https://github.com/huggingface/transformers/blob/main/src/transformers/generation/logits_process.py#L460
18-
@available(macOS 15.0, iOS 18.0, *)
18+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
1919
public struct MinPLogitsWarper: LogitsProcessor {
2020
public let minP: Float
2121
public let minTokensToKeep: Int

Sources/Generation/LogitsWarper/RepetitionPenaltyLogitsProcessor.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ public enum LogitsProcessorError: Error {
2222
/// Based on:
2323
/// - https://github.com/huggingface/transformers/blob/main/src/transformers/generation/logits_process.py#L297
2424
/// - Paper: https://arxiv.org/abs/1909.05858
25-
@available(macOS 15.0, iOS 18.0, *)
25+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
2626
public struct RepetitionPenaltyLogitsProcessor: LogitsProcessor {
2727
public let penalty: Float
2828

Sources/Generation/LogitsWarper/TemperatureLogitsWarper.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ import CoreML
1111
/// Often used together with `TopPLogitsWarper` and `TopKLogitsWarper`.
1212
///
1313
/// Based on: https://github.com/huggingface/transformers/blob/main/src/transformers/generation/logits_process.py#L231
14-
@available(macOS 15.0, iOS 18.0, *)
14+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
1515
public struct TemperatureLogitsWarper: LogitsProcessor {
1616
public let temperature: Float
1717

Sources/Generation/LogitsWarper/TopKLogitsWarper.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ import CoreML
1010
/// Pro tip: In practice, LLMs use top_k in the 5-50 range.
1111
///
1212
/// Based on: https://github.com/huggingface/transformers/blob/main/src/transformers/generation/logits_process.py#L532
13-
@available(macOS 15.0, iOS 18.0, *)
13+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
1414
public struct TopKLogitsWarper: LogitsProcessor {
1515
public let topK: Int
1616
public let filterValue: Float

Sources/Generation/LogitsWarper/TopPLogitsWarper.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ import CoreML
1414
/// Based on:
1515
/// - https://github.com/huggingface/transformers/blob/main/src/transformers/generation/logits_process.py#L465
1616
/// - Paper: https://arxiv.org/abs/1904.09751 (Nucleus Sampling)
17-
@available(macOS 15.0, iOS 18.0, *)
17+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
1818
public struct TopPLogitsWarper: LogitsProcessor {
1919
public let topP: Float
2020
public let filterValue: Float

Sources/Models/LanguageModel.swift

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ import Generation
1212
import Hub
1313
import Tokenizers
1414

15-
@available(macOS 15.0, iOS 18.0, *)
15+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
1616
/// A high-level interface for language model inference using CoreML.
1717
///
1818
/// `LanguageModel` provides a convenient way to load and interact with pre-trained
@@ -72,7 +72,7 @@ public class LanguageModel {
7272
}
7373
}
7474

75-
@available(macOS 15.0, iOS 18.0, *)
75+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
7676
private extension LanguageModel {
7777
static func contextRange(from model: MLModel) -> (min: Int, max: Int) {
7878
contextRange(from: model, inputKey: Keys.inputIds)
@@ -109,7 +109,7 @@ private extension LanguageModel {
109109
}
110110
}
111111

112-
@available(macOS 15.0, iOS 18.0, *)
112+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
113113
extension LanguageModel {
114114
struct Configurations {
115115
var modelConfig: Config
@@ -118,7 +118,7 @@ extension LanguageModel {
118118
}
119119
}
120120

121-
@available(macOS 15.0, iOS 18.0, *)
121+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
122122
extension LanguageModel {
123123
enum Keys {
124124
// Input keys
@@ -135,7 +135,7 @@ extension LanguageModel {
135135
}
136136
}
137137

138-
@available(macOS 15.0, iOS 18.0, *)
138+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
139139
public extension LanguageModel {
140140
/// Loads a compiled CoreML model from disk.
141141
///
@@ -155,7 +155,7 @@ public extension LanguageModel {
155155
}
156156
}
157157

158-
@available(macOS 15.0, iOS 18.0, *)
158+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
159159
extension LanguageModel {
160160
enum KVCacheAvailability {
161161
/// Language models that support KV cache via state. Implementation details for handling state
@@ -167,7 +167,7 @@ extension LanguageModel {
167167
}
168168
}
169169

170-
@available(macOS 15.0, iOS 18.0, *)
170+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
171171
public extension LanguageModel {
172172
/// Metadata fields associated to the Core ML model.
173173
var metadata: [MLModelMetadataKey: Any] {
@@ -296,7 +296,7 @@ public extension LanguageModel {
296296
// MARK: - Configuration Properties
297297

298298
/// Asynchronous properties that are downloaded from the Hugging Face Hub configuration.
299-
@available(macOS 15.0, iOS 18.0, *)
299+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
300300
public extension LanguageModel {
301301
/// The model configuration dictionary.
302302
///
@@ -402,7 +402,7 @@ public extension LanguageModel {
402402

403403
// MARK: - TextGenerationModel Conformance
404404

405-
@available(macOS 15.0, iOS 18.0, *)
405+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
406406
extension LanguageModel: TextGenerationModel {
407407
/// The default generation configuration for this model.
408408
///
@@ -424,7 +424,7 @@ extension LanguageModel: TextGenerationModel {
424424
///
425425
/// Maintains a KV Cache as sequence generation progresses,
426426
/// using stateful Core ML buffers to minimize latency.
427-
@available(macOS 15.0, iOS 18.0, *)
427+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
428428
public class LanguageModelWithStatefulKVCache: LanguageModel {
429429
private enum Mode {
430430
case prefilling

Sources/Models/LanguageModelTypes.swift

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ import Tokenizers
1515
///
1616
/// This protocol establishes the fundamental requirements for any language model
1717
/// that can perform next-token prediction and text generation tasks.
18-
@available(macOS 15.0, iOS 18.0, *)
18+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
1919
public protocol LanguageModelProtocol {
2020
/// The name or path of the model.
2121
///
@@ -50,7 +50,7 @@ public protocol LanguageModelProtocol {
5050
func predictNextTokenScores(_ input: MLTensor, config: GenerationConfig) async -> MLTensor
5151
}
5252

53-
@available(macOS 15.0, iOS 18.0, *)
53+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
5454
public extension LanguageModelProtocol {
5555
/// Function call syntax for next token prediction.
5656
///
@@ -69,7 +69,7 @@ public extension LanguageModelProtocol {
6969
///
7070
/// This protocol extends `LanguageModelProtocol` and `Generation` to provide
7171
/// high-level text generation functionality with configurable parameters.
72-
@available(macOS 15.0, iOS 18.0, *)
72+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
7373
public protocol TextGenerationModel: Generation, LanguageModelProtocol {
7474
/// The default generation configuration for this model.
7575
///
@@ -92,7 +92,7 @@ public protocol TextGenerationModel: Generation, LanguageModelProtocol {
9292
) async throws -> String
9393
}
9494

95-
@available(macOS 15.0, iOS 18.0, *)
95+
@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, watchOS 11.0, *)
9696
public extension TextGenerationModel {
9797
/// Default implementation of text generation that uses the underlying generation framework.
9898
///

0 commit comments

Comments
 (0)