I'm working on a project and just saw this message in Xcode:
Copy of noncopyable typed value. This is a compiler bug. Please file a bug with a small example of the bug
It's pointing at this line:
await withTaskGroup(of: (Float,Float).self) { group in
but I'm not sure that's meaningful enough for a report .. I'll add the entire code block below, but where would I start figuring out what to include in the report?
Is this really a compiler issue? I don't have a lot of confidence in my concurrency code.
Chunk of code with error in it...
internal func gradientDescent<I: SNLPIndex>(_ index: I?) async
{
guard let hdEmbeddings else {
fatalError("ldEmbeddings and hdEmbeddings must be initialized before gradient descent.")
}
let computeValueAndGradient = valueAndGrad(loss)
//var bestLoss: Float = .greatestFiniteMagnitude
let bestLoss = Mutex<Float>(.greatestFiniteMagnitude)
var earlyStopCounter = 0
let totalSize = ldEmbeddings!.shape[0]
await withTaskGroup(of: (Float,Float).self) { group in
var numActiveThreads = 0
var completed = 0
/// Create a separate batch for each thread we want to process concurrently
/// - Create a set of indices randomly that we plan on using for our calculations
/// - Create working copies of low- and high-dimensional embedddings
/// - Then, each thread will:
/// - Calculate high-dimensional affinities
/// - Calculate gradient
/// - return value and gradient norm so we can decide if we want to quit early
for i in 0 ..< maxIterations {
while numActiveThreads < numThreads {
group.addTask(priority: .high) { [learningRate = learningRate, batchSize = batchSize, breakIn = breakIn, earlyExaggeration = earlyExaggeration, computeValueAndGradient] in
// Randomly shuffle indices to select a batch
let indices = MLXArray((0 ..< totalSize).shuffled().prefix(batchSize))
// Create mini-batches for ldEmbeddings and hdEmbeddings based on selected indices
let ldBatch = self.ldEmbeddings![indices]
let hdBatch = hdEmbeddings[indices]
if let index = index {
}
let hdAffinities = tSNE.computeHighDimAffinities(data: hdBatch)
if i < breakIn {
hdAffinities *= earlyExaggeration
}
// Compute the gradient for the current batch
let (value, gradient) = computeValueAndGradient([ldBatch, hdAffinities])
// Update our ldEmbeddings estimate for the next batch
// Make sure we don't do this at the same time as another worker task
self.ldEmbeddingsMutex.withLock { _ in
for (i, idx) in indices.enumerated() {
self.ldEmbeddings![idx] -= learningRate * gradient[0][i]
}
}
return (value[0].item(), norm(gradient[0]).item())
}
numActiveThreads += 1
//print("New thread added: \(numActiveThreads)/\(numThreads) threads active")
}
// Wait until we have room for additional Tasks
if let (value, gradientNorm) = await group.next() {
numActiveThreads -= 1
completed += 1
//If our gradient norm is too small, stop
if gradientNorm < minimumGradientNorm {
//print("Norm too small: \(completed)")
break
}
//If we're far enough into this process, monitor for whether we can early exit
if completed > breakIn {
// Evaluate the loss value
bestLoss.withLock { bestLoss in
if value < bestLoss {
bestLoss = value
earlyStopCounter = 0
} else {
earlyStopCounter += 1
}
}
if earlyStopCounter >= patience {
//print("Early stop: \(completed)")
break
}
}
}
}
}
}