Skip to content

Commit 868e218

Browse files
authored
Merge pull request #2303 from ahoppen/active-processor-count
Use `activeProcessorCount` instead of `processorCount` in short-lived use-cases
2 parents d404738 + 4fe68ee commit 868e218

File tree

7 files changed

+10
-15
lines changed

7 files changed

+10
-15
lines changed

Sources/CompletionScoring/Text/CandidateBatch.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -440,7 +440,7 @@ extension Pattern {
440440
compactScratchArea(capacity: Self.totalCandidates(batches: batches)) { matchesScratchArea in
441441
let scoringWorkloads = ScoringWorkload.workloads(
442442
for: batches,
443-
parallelism: ProcessInfo.processInfo.processorCount
443+
parallelism: ProcessInfo.processInfo.activeProcessorCount
444444
)
445445
// `nonisolated(unsafe)` is fine because every iteration accesses a distinct index of the buffer.
446446
nonisolated(unsafe) let matchesScratchArea = matchesScratchArea

Sources/CompletionScoring/Text/ScoredMatchSelector.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ package class ScoredMatchSelector {
2828
package init(batches: [CandidateBatch]) {
2929
let scoringWorkloads = ScoringWorkload.workloads(
3030
for: batches,
31-
parallelism: ProcessInfo.processInfo.processorCount
31+
parallelism: ProcessInfo.processInfo.activeProcessorCount
3232
)
3333
threadWorkloads = scoringWorkloads.map { scoringWorkload in
3434
ThreadWorkload(allBatches: batches, slices: scoringWorkload.slices)

Sources/CompletionScoring/Utilities/SwiftExtensions.swift

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -298,8 +298,8 @@ extension ContiguousZeroBasedIndexedCollection {
298298
// extra jobs should let the performance cores pull a disproportionate amount of work items. More fine
299299
// granularity also helps if the work items aren't all the same difficulty, for the same reason.
300300

301-
// Defensive against `processorCount` failing
302-
let sliceCount = Swift.min(Swift.max(ProcessInfo.processInfo.processorCount * 32, 1), count)
301+
// Defensive against `activeProcessorCount` failing
302+
let sliceCount = Swift.min(Swift.max(ProcessInfo.processInfo.activeProcessorCount * 32, 1), count)
303303
let count = self.count
304304
DispatchQueue.concurrentPerform(iterations: sliceCount) { sliceIndex in
305305
precondition(sliceCount >= 1)

Sources/SemanticIndex/TaskScheduler.swift

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -644,14 +644,6 @@ package actor TaskScheduler<TaskDescription: TaskDescriptionProtocol> {
644644
}
645645
}
646646

647-
extension TaskScheduler {
648-
package static var forTesting: TaskScheduler {
649-
return .init(maxConcurrentTasksByPriority: [
650-
(.low, ProcessInfo.processInfo.processorCount)
651-
])
652-
}
653-
}
654-
655647
// MARK: - Collection utilities
656648

657649
fileprivate extension Collection where Element: Comparable {

Sources/SourceKitLSP/SourceKitLSPServer.swift

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,9 @@ package actor SourceKitLSPServer {
138138
isIndexingPaused: Bool,
139139
options: SourceKitLSPOptions
140140
) -> [(priority: TaskPriority, maxConcurrentTasks: Int)] {
141+
// Use `processorCount` instead of `activeProcessorCount` here because `activeProcessorCount` may be decreased due
142+
// to thermal throttling. We don't want to consistently limit the concurrent indexing tasks if SourceKit-LSP was
143+
// launched during a period of thermal throttling.
141144
let processorCount = ProcessInfo.processInfo.processorCount
142145
let lowPriorityCores =
143146
if isIndexingPaused {

Sources/SourceKitLSP/SyntacticTestIndex.swift

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ actor SyntacticTestIndex {
110110
// in O(number of pending tasks), since we need to scan for dependency edges to add, which would make scanning files
111111
// be O(number of files).
112112
// Over-subscribe the processor count in case one batch finishes more quickly than another.
113-
let batches = testFiles.partition(intoNumberOfBatches: ProcessInfo.processInfo.processorCount * 4)
113+
let batches = testFiles.partition(intoNumberOfBatches: ProcessInfo.processInfo.activeProcessorCount * 4)
114114
await batches.concurrentForEach { filesInBatch in
115115
for uri in filesInBatch {
116116
await self.rescanFileAssumingOnQueue(uri)
@@ -194,7 +194,7 @@ actor SyntacticTestIndex {
194194
// in O(number of pending tasks), since we need to scan for dependency edges to add, which would make scanning files
195195
// be O(number of files).
196196
// Over-subscribe the processor count in case one batch finishes more quickly than another.
197-
let batches = uris.partition(intoNumberOfBatches: ProcessInfo.processInfo.processorCount * 4)
197+
let batches = uris.partition(intoNumberOfBatches: ProcessInfo.processInfo.activeProcessorCount * 4)
198198
for batch in batches {
199199
self.indexingQueue.async(priority: .low, metadata: .index(Set(batch))) {
200200
for uri in batch {

Sources/SwiftExtensions/AsyncUtils.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ package func withCancellableCheckedThrowingContinuation<Handle: Sendable, Result
135135
extension Collection where Self: Sendable, Element: Sendable {
136136
/// Transforms all elements in the collection concurrently and returns the transformed collection.
137137
package func concurrentMap<TransformedElement: Sendable>(
138-
maxConcurrentTasks: Int = ProcessInfo.processInfo.processorCount,
138+
maxConcurrentTasks: Int = ProcessInfo.processInfo.activeProcessorCount,
139139
_ transform: @escaping @Sendable (Element) async -> TransformedElement
140140
) async -> [TransformedElement] {
141141
let indexedResults = await withTaskGroup(of: (index: Int, element: TransformedElement).self) { taskGroup in

0 commit comments

Comments
 (0)