Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.

using System;
using System.Threading;

namespace Microsoft.CodeAnalysis.Razor.Utilities;

internal sealed partial class MemoryCache<TKey, TValue> where TKey : notnull
where TValue : class
{
private sealed class Entry(TValue value)
{
private long _lastAccessTicks = DateTime.UtcNow.Ticks;

public DateTime LastAccess => new(Volatile.Read(ref _lastAccessTicks));
public TValue Value => value;

public void UpdateLastAccess()
{
Volatile.Write(ref _lastAccessTicks, DateTime.UtcNow.Ticks);
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.

using System;

namespace Microsoft.CodeAnalysis.Razor.Utilities;

internal partial class MemoryCache<TKey, TValue>
where TKey : notnull
where TValue : class
{
internal TestAccessor GetTestAccessor() => new(this);

internal struct TestAccessor(MemoryCache<TKey, TValue> instance)
{
public event Action Compacted
{
add => instance._compactedHandler += value;
remove => instance._compactedHandler -= value;
}

public readonly bool TryGetLastAccess(TKey key, out DateTime result)
{
if (instance._map.TryGetValue(key, out var value))
{
result = value.LastAccess;
return true;
}

result = default;
return false;
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -5,77 +5,126 @@
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Diagnostics.CodeAnalysis;
using System.Linq;

namespace Microsoft.CodeAnalysis.Razor.Utilities;

// We've created our own MemoryCache here, ideally we would use the one in Microsoft.Extensions.Caching.Memory,
// but until we update O# that causes an Assembly load problem.
internal class MemoryCache<TKey, TValue>
/// <summary>
/// A thread-safe, size-limited cache with approximate LRU (Least Recently Used)
/// eviction policy. When the cache reaches its size limit, it removes approximately
/// half of the least recently used entries.
/// </summary>
/// <typeparam name="TKey">The type of keys in the cache.</typeparam>
/// <typeparam name="TValue">The type of values in the cache.</typeparam>
/// <param name="sizeLimit">The maximum number of entries the cache can hold before compaction is triggered.</param>
/// <param name="concurrencyLevel">The estimated number of threads that will update the cache concurrently.</param>
internal sealed partial class MemoryCache<TKey, TValue>(int sizeLimit = 50, int concurrencyLevel = 2)
where TKey : notnull
where TValue : class
{
private const int DefaultSizeLimit = 50;
private const int DefaultConcurrencyLevel = 2;
private readonly ConcurrentDictionary<TKey, Entry> _map = new(concurrencyLevel, capacity: sizeLimit);

protected IDictionary<TKey, CacheEntry> _dict;
/// <summary>
/// Lock used to synchronize cache compaction operations. This prevents multiple threads
/// from attempting to compact the cache simultaneously while allowing concurrent reads.
/// </summary>
private readonly object _compactLock = new();
private readonly int _sizeLimit = sizeLimit;

private readonly object _compactLock;
private readonly int _sizeLimit;
/// <summary>
/// Optional callback invoked after cache compaction completes. Only used by tests.
/// </summary>
private Action? _compactedHandler;

public MemoryCache(int sizeLimit = DefaultSizeLimit, int concurrencyLevel = DefaultConcurrencyLevel)
/// <summary>
/// Attempts to retrieve a value from the cache and updates its last access time if found.
/// </summary>
public bool TryGetValue(TKey key, [NotNullWhen(true)] out TValue? result)
{
_sizeLimit = sizeLimit;
_dict = new ConcurrentDictionary<TKey, CacheEntry>(concurrencyLevel, capacity: _sizeLimit);
_compactLock = new object();
}

public bool TryGetValue(TKey key, [NotNullWhen(returnValue: true)] out TValue? result)
{
if (_dict.TryGetValue(key, out var value))
if (_map.TryGetValue(key, out var entry))
{
value.LastAccess = DateTime.UtcNow;
result = value.Value;
entry.UpdateLastAccess();
result = entry.Value;
return true;
}

result = default;
return false;
}

/// <summary>
/// Adds or updates a value in the cache. If the cache is at capacity, triggers compaction
/// before adding the new entry.
/// </summary>
public void Set(TKey key, TValue value)
{
CompactIfNeeded();

_map[key] = new Entry(value);
}

/// <summary>
/// Removes approximately half of the least recently used entries when the cache reaches capacity.
/// </summary>
private void CompactIfNeeded()
{
// Fast path: check size without locking
if (_map.Count < _sizeLimit)
{
return;
}

lock (_compactLock)
{
if (_dict.Count >= _sizeLimit)
// Double-check after acquiring lock in case another thread already compacted
if (_map.Count < _sizeLimit)
{
Compact();
return;
}
}

_dict[key] = new CacheEntry
{
LastAccess = DateTime.UtcNow,
Value = value,
};
}
// Create a snapshot with last access times to implement approximate LRU eviction.
// This captures each entry's access time to determine which entries were least recently used.
var orderedItems = _map.ToArray().SelectAndOrderByAsArray(
selector: static x => (x.Key, x.Value.LastAccess),
keySelector: static x => x.LastAccess);

public void Clear() => _dict.Clear();
var toRemove = Math.Max(_sizeLimit / 2, 1);

protected virtual void Compact()
{
var kvps = _dict.ToArray().OrderBy(x => x.Value.LastAccess).ToArray();
// Remove up to half of the oldest entries using an atomic remove-then-check pattern.
// This ensures we don't remove entries that were accessed after our snapshot was taken.
foreach (var (itemKey, itemLastAccess) in orderedItems)
{
// Atomic remove-then-check pattern eliminates race conditions
// Note: If TryRemove fails, another thread already removed this entry.
if (_map.TryRemove(itemKey, out var removedEntry))
{
if (removedEntry.LastAccess == itemLastAccess)
{
// Entry was still old when removed - successful eviction
toRemove--;

for (var i = 0; i < _sizeLimit / 2; i++)
{
_dict.Remove(kvps[i].Key);
// Stop early if we've removed enough entries
if (toRemove == 0)
{
break;
}
}
else
{
// Entry was accessed after snapshot - try to restore it
// If TryAdd fails, another thread already added a new entry with this key,
// which is acceptable - we preserve the hot entry's data either way
_map.TryAdd(itemKey, removedEntry);
}
}
}

_compactedHandler?.Invoke();
}
}

protected class CacheEntry
{
public required TValue Value { get; init; }

public required DateTime LastAccess { get; set; }
}
/// <summary>
/// Removes all entries from the cache.
/// </summary>
public void Clear()
=> _map.Clear();
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.

using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Razor.Test.Common;
using Microsoft.CodeAnalysis.Razor.Utilities;
using Xunit;
using Xunit.Abstractions;

namespace Microsoft.CodeAnalysis.Razor.Workspaces.Test.Utilities;

[CollectionDefinition(nameof(MemoryCachePerfTest), DisableParallelization = false)]
[Collection(nameof(MemoryCachePerfTest))]
public class MemoryCachePerfTest(ITestOutputHelper testOutput) : ToolingTestBase(testOutput)
{
[Fact]
public async Task HighFrequencyAccess_MaintainsPerformance()
{
var cache = new MemoryCache<string, IReadOnlyList<int>>();
const string Key = "hot-key";
cache.Set(Key, [1, 2, 3]);

const int AccessCount = 10_000;
var stopwatch = Stopwatch.StartNew();

var tasks = Enumerable.Range(0, Environment.ProcessorCount)
.Select(x => Task.Run(() =>
{
for (var i = 0; i < AccessCount / Environment.ProcessorCount; i++)
{
_ = cache.TryGetValue(Key, out _);
}
}))
.ToArray();

await Task.WhenAll(tasks);
stopwatch.Stop();

// Should complete reasonably quickly (adjust threshold as needed)
Assert.True(stopwatch.ElapsedMilliseconds < 1000,
$"High-frequency access took too long: {stopwatch.ElapsedMilliseconds}ms");
}
}
Loading