Deleting oldest values in case of exceeding max size
Tagless Final
Partition entries by hashCode into multiple caches in order to avoid thread contention for some corner cases
Cache.scala
traitCache[F[_], K, V] {
defget(key: K):F[Option[V]]
defgetOrElse(key: K, default: =>F[V]):F[V]
/** * Does not run `value` concurrently for the same key*/defgetOrUpdate(key: K)(value: =>F[V]):F[V]
/** * Does not run `value` concurrently for the same key * Releasable.release will be called upon key removal from the cache*/defgetOrUpdateReleasable(key: K)(value: =>F[Releasable[F, V]]):F[V]
/** * @return previous value if any, possibly not yet loaded*/defput(key: K, value: V):F[F[Option[V]]]
defput(key: K, value: V, release: F[Unit]):F[F[Option[V]]]
defsize:F[Int]
defkeys:F[Set[K]]
/** * Might be an expensive call*/defvalues:F[Map[K, F[V]]]
/** * @return previous value if any, possibly not yet loaded*/defremove(key: K):F[F[Option[V]]]
/** * Removes loading values from the cache, however does not cancel them*/defclear:F[F[Unit]]
}
SerialMap.scala
traitSerialMap[F[_], K, V] {
defget(key: K):F[Option[V]]
defgetOrElse(key: K, default: =>F[V]):F[V]
/** * Does not run `value` concurrently for the same key*/defgetOrUpdate(key: K, value: =>F[V]):F[V]
defput(key: K, value: V):F[Option[V]]
/** * `f` will be run serially for the same key, entry will be removed in case of `f` returns `none`*/defmodify[A](key: K)(f: Option[V] =>F[(Option[V], A)]):F[A]
/** * `f` will be run serially for the same key, entry will be removed in case of `f` returns `none`*/defupdate[A](key: K)(f: Option[V] =>F[Option[V]]):F[Unit]
defsize:F[Int]
defkeys:F[Set[K]]
/** * Might be an expensive call*/defvalues:F[Map[K, V]]
defremove(key: K):F[Option[V]]
defclear:F[Unit]
}