<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd" xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2009/04/01/roofline-an-insightful-visual-performance-model-for-multicore-architectures.html</loc>
<lastmod>2009-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2010/01/01/demystifying-gpu-microarchitecture-through-microbenchmarking.html</loc>
<lastmod>2010-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2013/12/01/playing-atari-with-deep-reinforcement-learning.html</loc>
<lastmod>2013-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/pretrain_sft/2013/12/01/what-makes-good-data-for-alignment-a-comprehensive-study-of-automatic-data-selection-in-instruction-tuning.html</loc>
<lastmod>2013-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2014/11/01/introducing-data-center-fabric-the-next-generation-facebook-data-center-network.html</loc>
<lastmod>2014-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2015/07/01/massively-parallel-methods-for-deep-reinforcement-learning.html</loc>
<lastmod>2015-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2015/07/01/ucx-an-open-source-framework-for-hpc-network-apis-and-beyond.html</loc>
<lastmod>2015-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2015/08/01/jupiter-rising-a-decade-of-clos-topologies-and-centralized-control-in-googles-datacenter-network.html</loc>
<lastmod>2015-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2016/01/01/mastering-the-game-of-go-with-deep-neural-networks-and-tree-search.html</loc>
<lastmod>2016-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2016/01/01/single-pass-parallel-prefix-scan-with-decoupled-look-back.html</loc>
<lastmod>2016-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2016/04/01/optimizing-performance-of-recurrent-neural-networks-on-gpus.html</loc>
<lastmod>2016-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2016/06/01/design-guidelines-for-high-performance-rdma-systems.html</loc>
<lastmod>2016-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2017/04/01/locality-aware-cta-clustering-for-modern-gpus.html</loc>
<lastmod>2017-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2017/05/01/offloading-communication-control-logic-in-gpu-accelerated-applications.html</loc>
<lastmod>2017-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2017/08/01/proximal-policy-optimization-algorithms.html</loc>
<lastmod>2017-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2017/10/01/optimizing-cache-bypassing-and-warp-scheduling-for-gpus.html</loc>
<lastmod>2017-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2017/12/01/rllib-abstractions-for-distributed-reinforcement-learning.html</loc>
<lastmod>2017-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2018/03/01/improving-real-time-performance-with-cuda-persistent-threads-cuper-on-the-jetson-tx2.html</loc>
<lastmod>2018-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2018/04/01/gpudirect-async-exploring-gpu-synchronous-communication-techniques-for-infiniband-clusters.html</loc>
<lastmod>2018-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2018/06/01/pipedream-fast-and-efficient-pipeline-parallel-dnn-training.html</loc>
<lastmod>2018-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2018/06/01/tensor-comprehensions-framework-agnostic-high-performance-machine-learning-abstractions.html</loc>
<lastmod>2018-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/transformer-variant/2018/07/01/universal-transformers.html</loc>
<lastmod>2018-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/compiler/2018/10/01/tvm-an-automated-end-to-end-optimizing-compiler-for-deep-learning.html</loc>
<lastmod>2018-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2018/11/01/blockwise-parallel-decoding-for-deep-autoregressive-models.html</loc>
<lastmod>2018-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/ssm/2019/01/01/transformer-xl-attentive-language-models-beyond-a-fixed-length-context.html</loc>
<lastmod>2019-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2019/07/01/gpipe-easy-scaling-with-micro-batch-pipeline-parallelism.html</loc>
<lastmod>2019-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2019/10/01/netdimm-low-latency-near-memory-network-interface-architecture.html</loc>
<lastmod>2019-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2019/10/01/seed-rl-scalable-and-efficient-deep-rl-with-accelerated-central-inference.html</loc>
<lastmod>2019-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2019/10/01/transformers-state-of-the-art-natural-language-processing.html</loc>
<lastmod>2019-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2019/11/01/fast-transformer-decoding-one-write-head-is-all-you-need.html</loc>
<lastmod>2019-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2020/02/01/gpu-initiated-openshmem-correct-and-eicient-intra-kernel-networking-for-dgpus.html</loc>
<lastmod>2020-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2020/02/01/low-rank-bottleneck-in-multi-head-attention-models.html</loc>
<lastmod>2020-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2020/03/01/megatron-lm-training-multi-billion-parameter-language-models-using-model-parallelism.html</loc>
<lastmod>2020-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2020/03/01/zero-memory-optimizations-toward-training-trillion-parameter-models.html</loc>
<lastmod>2020-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/sparsity/2020/04/01/longformer-the-long-document-transformer.html</loc>
<lastmod>2020-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2020/06/01/gshard-scaling-giant-models-with-conditional-computation-and-automatic-sharding.html</loc>
<lastmod>2020-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/linear/2020/06/01/transformers-are-rnns-fast-autoregressive-transformers-with-linear-attention.html</loc>
<lastmod>2020-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2020/08/01/an-in-depth-analysis-of-the-slingshot-interconnect.html</loc>
<lastmod>2020-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2020/09/01/fusionstitching-boosting-memory-intensive-computations-for-deep-learning-workloads.html</loc>
<lastmod>2020-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2020/09/01/learning-to-summarize-from-human-feedback.html</loc>
<lastmod>2020-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/eval/2020/09/01/measuring-massive-multitask-language-understanding.html</loc>
<lastmod>2020-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2020/11/01/ansor-generating-high-performance-tensor-programs-for-deep-learning.html</loc>
<lastmod>2020-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2021/01/01/unit-unifying-tensorized-instruction-compilation.html</loc>
<lastmod>2021-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2021/01/01/zero-offload-democratizing-billion-scale-model-training.html</loc>
<lastmod>2021-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2021/02/01/c-for-metal-high-performance-simd-programming-on-intel-gpus.html</loc>
<lastmod>2021-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2021/02/01/checkfreq-frequent-fine-grained-dnn-checkpointing.html</loc>
<lastmod>2021-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/linear/2021/02/01/learning-associative-inference-using-fast-weight-memory.html</loc>
<lastmod>2021-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2021/02/01/mlir-scaling-compiler-infrastructure-for-domain-specific-computation.html</loc>
<lastmod>2021-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2021/02/01/progressive-raising-in-multi-level-ir.html</loc>
<lastmod>2021-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/pretrain_sft/2021/04/01/roformer-enhanced-transformer-with-rotary-position-embedding.html</loc>
<lastmod>2021-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2021/04/01/zero-infinity-breaking-the-gpu-memory-wall-for-extreme-scale-deep-learning.html</loc>
<lastmod>2021-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/linear/2021/06/01/linear-transformers-are-secretly-fast-weight-programmers.html</loc>
<lastmod>2021-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2021/06/01/lora-low-rank-adaptation-of-large-language-models.html</loc>
<lastmod>2021-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2021/07/01/chimera-efficiently-training-large-scale-neural-networks-with-bidirectional-pipelines.html</loc>
<lastmod>2021-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2021/07/01/pet-optimizing-tensor-programs-with-partially-equivalent-transformations-and-automated-corrections.html</loc>
<lastmod>2021-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2021/08/01/dnnfusion-accelerating-deep-neural-networks-execution-with-advanced-operator-fusion.html</loc>
<lastmod>2021-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2021/08/01/efficient-large-scale-language-model-training-on-gpu-clusters-using-megatron-lm.html</loc>
<lastmod>2021-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2021/10/01/bolt-bridging-the-gap-between-auto-tuners-and-hardware-native-performance.html</loc>
<lastmod>2021-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/ssm/2021/10/01/combining-recurrent-convolutional-and-continuous-time-models-with-linear-state-space-layers.html</loc>
<lastmod>2021-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/ssm/2021/10/01/efficiently-modeling-long-sequences-with-structured-state-spaces.html</loc>
<lastmod>2021-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2021/12/01/glam-efficient-scaling-of-language-models-with-mixture-of-experts.html</loc>
<lastmod>2021-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2021/12/01/torchfx-practical-program-capture-and-transformation-for-deep-learning-in-python.html</loc>
<lastmod>2021-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2022/01/01/a-compiler-framework-for-optimizing-dynamic-parallelism-on-gpus.html</loc>
<lastmod>2022-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/framework/2022/01/01/campo-cost-aware-performance-optimization-for-mixed-precision-neural-network-training.html</loc>
<lastmod>2022-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/agent/2022/01/01/chain-of-thought-prompting-elicits-reasoning-in-large-language-models.html</loc>
<lastmod>2022-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2022/01/01/darm-control-flow-melding-for-simt-thread-divergence-reduction.html</loc>
<lastmod>2022-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2022/01/01/deepspeed-moe-advancing-mixture-of-experts-inference-and-training-to-power-next-generation-ai-scale.html</loc>
<lastmod>2022-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2022/01/01/nvidia-h100-tensor-core-gpu-architecture.html</loc>
<lastmod>2022-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/linear/2022/01/01/transformer-quality-in-linear-time.html</loc>
<lastmod>2022-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2022/02/01/astitch-enabling-a-new-multi-dimensional-optimization-space-for-memory-intensive-ml-training-and-inference-on-modern-simt-architectures.html</loc>
<lastmod>2022-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2022/02/01/doubling-all2all-performance-with-nvidia-collective-communication-library-212.html</loc>
<lastmod>2022-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2022/02/01/neoflow-a-flexible-framework-for-enabling-efficient-compilation-for-high-performance-dnn-training.html</loc>
<lastmod>2022-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2022/02/01/st-moe-designing-stable-and-transferable-sparse-expert-models.html</loc>
<lastmod>2022-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/pretrain_sft/2022/03/01/deepnet-scaling-transformers-to-1000-layers.html</loc>
<lastmod>2022-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/pretrain_sft/2022/03/01/tensor-programs-v-tuning-large-neural-networks-via-zero-shot-hyperparameter-transfer.html</loc>
<lastmod>2022-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/pretrain_sft/2022/03/01/training-compute-optimal-large-language-models.html</loc>
<lastmod>2022-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2022/03/01/training-language-models-to-follow-instructions-with-human-feedback.html</loc>
<lastmod>2022-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2022/04/01/palm-scaling-language-modeling-with-pathways.html</loc>
<lastmod>2022-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2022/04/01/training-a-helpful-and-harmless-assistant-with-reinforcement-learning-from-human-feedback.html</loc>
<lastmod>2022-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2022/05/01/dietcode-automatic-optimization-for-dynamic-tensor-programs.html</loc>
<lastmod>2022-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/cpu/2022/05/01/everything-you-need-to-know-about-the-cpu-power-management.html</loc>
<lastmod>2022-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2022/05/01/fastermoe-modeling-and-optimizing-training-of-large-scale-dynamic-pre-trained-models.html</loc>
<lastmod>2022-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2022/05/01/pathways-asynchronous-distributed-dataflow-for-ml.html</loc>
<lastmod>2022-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/cpu/2022/05/01/understanding-bios-configuration-for-performance-tuning.html</loc>
<lastmod>2022-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2022/06/01/efficiently-emulating-high-bitwidth-computation-with-low-bitwidth-hardware.html</loc>
<lastmod>2022-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2022/06/01/gmi-drl-empowering-multi-gpu-deep-reinforcement-learning-with-gpu-spatial-multiplexing.html</loc>
<lastmod>2022-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2022/06/01/tutel-adaptive-mixture-of-experts-at-scale.html</loc>
<lastmod>2022-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2022/07/01/alpa-automating-inter-and-intra-operator-parallelism-for-distributed-deep-learning.html</loc>
<lastmod>2022-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2022/07/01/flashattention-fast-and-memory-efficient-exact-attention-with-io-awareness.html</loc>
<lastmod>2022-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2022/07/01/microsecond-scale-preemption-for-concurrent-gpu-accelerated-dnn-inferences.html</loc>
<lastmod>2022-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2022/07/01/orca-a-distributed-serving-system-for-transformer-based-generative-models.html</loc>
<lastmod>2022-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2022/07/01/reexamining-direct-cache-access-to-optimize-io-intensive-applications-for-multi-hundred-gigabit-networks.html</loc>
<lastmod>2022-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2022/07/01/roller-fast-and-efficient-tensor-compilation-for-deep-learning.html</loc>
<lastmod>2022-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2022/07/01/unity-accelerating-dnn-training-through-joint-optimization-of-algebraic-transformations-and-parallelization.html</loc>
<lastmod>2022-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/agent/2022/07/01/webshop-towards-scalable-real-world-web-interaction-with-grounded-language-agents.html</loc>
<lastmod>2022-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2022/08/01/fp8-quantization-the-power-of-the-exponent.html</loc>
<lastmod>2022-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2022/09/01/apollo-automatic-partition-based-operator-fusion-throughlayer-by-layer-optimization.html</loc>
<lastmod>2022-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2022/09/01/fp8-formats-for-deep-learning.html</loc>
<lastmod>2022-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2022/10/01/hidet-task-mapping-programming-paradigm-for-deep-learning-tensor-programs.html</loc>
<lastmod>2022-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2022/10/01/tensorir-an-abstraction-for-automatic-tensorized-program-optimization.html</loc>
<lastmod>2022-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/linear/2022/10/01/the-devil-in-linear-transformer.html</loc>
<lastmod>2022-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2022/11/01/efficiently-scaling-transformer-inference.html</loc>
<lastmod>2022-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2022/11/01/improving-network-performance-of-hpc-systems-using-nvidia-magnum-io-nvshmem-and-gpudirect-async.html</loc>
<lastmod>2022-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2022/11/01/on-optimizing-the-communication-of-model-parallelism.html</loc>
<lastmod>2022-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2022/11/01/smoothquant-accurate-and-efficient-post-training-quantization-for-large-language-models.html</loc>
<lastmod>2022-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2022/11/01/tlp-a-deep-learning-based-cost-model-for-tensor-program-tuning.html</loc>
<lastmod>2022-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2023/01/01/a-135-gbpsgbit-066-pjbit-stacked-embedded-dram-with-multilayer-arrays-by-fine-pitch-hybrid-bonding-and-mini-tsv.html</loc>
<lastmod>2023-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2023/01/01/onednn-graph-compiler-a-hybrid-approach-for-high-performance-deep-learning-compilation.html</loc>
<lastmod>2023-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2023/01/01/stream-k-work-centric-parallel-decomposition-for-dense-matrix-matrix-multiplication-on-the-gpu.html</loc>
<lastmod>2023-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2023/02/01/accelerating-large-language-model-decoding-with-speculative-sampling.html</loc>
<lastmod>2023-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2023/02/01/llama-open-and-efficient-foundation-language-models.html</loc>
<lastmod>2023-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/2023/02/01/to-pack-or-not-to-pack-a-generalized-packing-analysis-and-transformation.html</loc>
<lastmod>2023-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/agent/2023/02/01/toolformer-language-models-can-teach-themselves-to-use-tools.html</loc>
<lastmod>2023-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2023/03/01/deepspeed-chat-easy-fast-and-affordable-rlhf-training-of-chatgpt-like-models-at-all-scales.html</loc>
<lastmod>2023-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2023/03/01/graphene-an-ir-for-optimized-tensor-computations-on-gpus.html</loc>
<lastmod>2023-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/agent/2023/03/01/reac-t-synergizing-reasoning-and-acting-in-language-models.html</loc>
<lastmod>2023-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2023/03/01/scaling-vision-language-models-with-sparse-mixture-of-experts.html</loc>
<lastmod>2023-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/ssm/2023/03/01/simplified-state-space-layers-for-sequence-modeling.html</loc>
<lastmod>2023-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/ssm/2023/04/01/hungry-hungry-hippos-towards-language-modeling-with-state-space-models.html</loc>
<lastmod>2023-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2023/04/01/pytorch-fsdp-experiences-on-scaling-fully-sharded-data-parallel.html</loc>
<lastmod>2023-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2023/04/01/stable-and-low-precision-training-for-large-scale-vision-language-models.html</loc>
<lastmod>2023-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2023/04/01/with-shared-microexponents-a-little-shifting-goes-a-long-way.html</loc>
<lastmod>2023-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2023/05/01/a-framework-for-fine-grained-synchronization-of-dependent-gpu-kernels.html</loc>
<lastmod>2023-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2023/05/01/acrobat-optimizing-auto-batching-of-dynamic-deep-learning-at-compile-time.html</loc>
<lastmod>2023-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2023/05/01/alcop-automatic-load-compute-pipelining-in-deep-learning-compiler-for-ai-gpus.html</loc>
<lastmod>2023-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2023/05/01/autoscratch-ml-optimized-cache-management-for-inference-oriented-gpus.html</loc>
<lastmod>2023-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2023/05/01/direct-preference-optimization-your-language-model-is-secretly-a-reward-model.html</loc>
<lastmod>2023-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2023/05/01/fast-inference-from-transformers-via-speculative-decoding.html</loc>
<lastmod>2023-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2023/05/01/hardware-compute-partitioning-on-nvidia-gpus.html</loc>
<lastmod>2023-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2023/05/01/integer-or-floating-point-new-outlooks-for-low-bit-quantization-on-large-language-models.html</loc>
<lastmod>2023-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/eval/2023/05/01/on-the-tool-manipulation-capability-of-open-source-large-language-models.html</loc>
<lastmod>2023-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2023/05/01/sirius-harvesting-whole-program-optimization-opportunitiesfor-dnns.html</loc>
<lastmod>2023-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2023/06/01/awq-activation-aware-weight-quantization-for-on-device-llm-compression-and-acceleration.html</loc>
<lastmod>2023-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2023/06/01/fp8-versus-int8-for-efficient-deep-learning-inference.html</loc>
<lastmod>2023-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2023/07/01/cocktailer-analyzing-and-optimizing-dynamic-control-flow-in-deep-learning.html</loc>
<lastmod>2023-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2023/07/01/effectively-scheduling-computational-graphs-of-deep-neural-networks-toward-their-domain-specific-accelerators.html</loc>
<lastmod>2023-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2023/07/01/einnet-optimizing-tensor-programs-with-derivation-based-transformations.html</loc>
<lastmod>2023-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2023/07/01/flashattention-2-faster-attention-with-better-parallelism-and-work-partitioning.html</loc>
<lastmod>2023-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2023/07/01/llama-2-open-foundation-and-fine-tuned-chat-models.html</loc>
<lastmod>2023-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2023/07/01/optimizing-dynamic-neural-networks-with-brainstorm.html</loc>
<lastmod>2023-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2023/07/01/overview-of-and-motivation-for-the-forthcoming-ultra-ethernet-consortium-specification.html</loc>
<lastmod>2023-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2023/07/01/powerfusion-a-tensor-compiler-with-explicit-data-movement-description-and-instruction-level-graph-ir.html</loc>
<lastmod>2023-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2023/07/01/rail-only-a-low-cost-high-performance-network-for-training-llms-with-trillion-parameters.html</loc>
<lastmod>2023-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/linear/2023/07/01/scaling-transnormer-to-175-billion-parameters.html</loc>
<lastmod>2023-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2023/07/01/welder-scheduling-deep-learning-memory-access-via-tile-graph.html</loc>
<lastmod>2023-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2023/08/01/sarathi-efficient-llm-inference-by-piggybacking-decodes-with-chunked-prefills.html</loc>
<lastmod>2023-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2023/09/01/deepspeed-ulysses-system-optimizations-for-enabling-training-of-extreme-long-sequence-transformer-models.html</loc>
<lastmod>2023-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2023/09/01/efficient-memory-management-for-large-language-model-serving-with-pagedattention.html</loc>
<lastmod>2023-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2023/09/01/ocp-microscaling-formats-mx-specification.html</loc>
<lastmod>2023-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/agent/2023/09/01/tree-of-thoughts-deliberate-problem-solving-with-large-language-models.html</loc>
<lastmod>2023-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2023/10/01/cachegen-kv-cache-compression-and-streaming-for-fast-large-language-model-serving.html</loc>
<lastmod>2023-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2023/10/01/fault-tolerant-hybrid-parallel-training-at-scale-with-reliable-and-efficient-in-memory-checkpointing.html</loc>
<lastmod>2023-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/agent/2023/10/01/fireact-toward-language-agent-fine-tuning.html</loc>
<lastmod>2023-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2023/10/01/flash-decoding-for-long-context-inference.html</loc>
<lastmod>2023-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2023/10/01/flextrain-a-dynamic-training-framework-for-heterogeneous-devices-environments.html</loc>
<lastmod>2023-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2023/10/01/gemini-fast-failure-recovery-in-distributed-training-with-in-memory-checkpoints.html</loc>
<lastmod>2023-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2023/10/01/ring-attention-with-blockwise-transformers-for-near-infinite-context.html</loc>
<lastmod>2023-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2023/10/01/steerlm-attribute-conditioned-sft-as-an-user-steerable-alternative-to-rlhf.html</loc>
<lastmod>2023-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2023/10/01/tackling-the-matrix-multiplication-micro-kernel-generation-with-exo.html</loc>
<lastmod>2023-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/eval/2023/11/01/gaia-a-benchmark-for-general-ai-assistants.html</loc>
<lastmod>2023-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/eval/2023/11/01/instruction-following-evaluation-for-large-language-models.html</loc>
<lastmod>2023-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2023/11/01/striped-attention-faster-ring-attention-for-causal-transformers.html</loc>
<lastmod>2023-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2023/11/01/zero-bubble-pipeline-parallelism.html</loc>
<lastmod>2023-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2023/12/01/experiences-building-an-mlir-based-sycl-compiler.html</loc>
<lastmod>2023-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/linear/2023/12/01/gated-linear-attention-transformers-with-hardware-efficient-training.html</loc>
<lastmod>2023-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2023/12/01/gqa-training-generalized-multi-query-transformer-models-from-multi-head-checkpoints.html</loc>
<lastmod>2023-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2023/12/01/jitspmm-just-in-time-instruction-generation-for-accelerated-sparse-matrix-matrix-multiplication.html</loc>
<lastmod>2023-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/ssm/2023/12/01/mamba-linear-time-sequence-modeling-with-selective-state-spaces.html</loc>
<lastmod>2023-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/agent/2023/12/01/retrieval-augmented-generation-for-large-language-models-a-survey.html</loc>
<lastmod>2023-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2023/12/01/superserve-fine-grained-inference-serving-for-unpredictable-workloads.html</loc>
<lastmod>2023-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2024/01/01/deepseek-coder-when-the-large-language-model-meets-programming-the-rise-of-code-intelligence.html</loc>
<lastmod>2024-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2024/01/01/deepseekmoe-towards-ultimate-expert-specialization-in-mixture-of-experts-language-models.html</loc>
<lastmod>2024-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2024/01/01/distserve-disaggregating-prefill-and-decoding-for-goodput-optimized-large-language-model-serving.html</loc>
<lastmod>2024-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2024/01/01/eagle-speculative-sampling-requires-rethinking-feature-uncertainty.html</loc>
<lastmod>2024-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2024/01/01/fp6-llm-efficiently-serving-large-language-models-through-fp6-centric-algorithm-system-co-design.html</loc>
<lastmod>2024-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/linear/2024/01/01/lightning-attention-2-a-free-lunch-for-handling-unlimited-sequence-lengths-in-large-language-models.html</loc>
<lastmod>2024-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2024/01/01/optimal-kernel-orchestration-for-tensor-programs-with-korch.html</loc>
<lastmod>2024-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/2024/01/01/polytops-reconfigurable-and-flexible-polyhedral-scheduler.html</loc>
<lastmod>2024-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2024/01/01/self-play-fine-tuning-converts-weak-language-models-to-strong-language-models.html</loc>
<lastmod>2024-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2024/02/01/deepseekmath-pushing-the-limits-of-mathematical-reasoning-in-open-language-models.html</loc>
<lastmod>2024-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2024/02/01/massive-activations-in-large-language-models.html</loc>
<lastmod>2024-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2024/02/01/megascale-scaling-large-language-model-training-to-more-than-10000-gpus.html</loc>
<lastmod>2024-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/ssm/2024/02/01/moe-mamba-efficient-selective-state-space-models-with-mixture-of-experts.html</loc>
<lastmod>2024-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2024/02/01/sod2-statically-optimizing-dynamic-deep-neural-network-execution.html</loc>
<lastmod>2024-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2024/03/01/deft-decoding-with-flash-tree-attention-for-efficient-tree-structured-llm-inference.html</loc>
<lastmod>2024-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/2024/03/01/depyf-open-the-opaque-box-of-pytorch-compiler-for-machine-learning-researchers.html</loc>
<lastmod>2024-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2024/03/01/gemini-15-unlocking-multimodal-understanding-across-millions-of-tokens-of-context.html</loc>
<lastmod>2024-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/ssm/2024/03/01/jamba-a-hybrid-transformer-mamba-language-model.html</loc>
<lastmod>2024-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/transformer-variant/2024/03/01/scaling-up-test-time-compute-with-latent-reasoning-a-recurrent-depth-approach.html</loc>
<lastmod>2024-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2024/03/01/scattered-mixture-of-experts-implementation.html</loc>
<lastmod>2024-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2024/03/01/wasp-exploiting-gpu-pipeline-parallelism-with-hardware-accelerated-automatic-warp-specialization.html</loc>
<lastmod>2024-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2024/04/01/better-faster-large-language-models-via-multi-token-prediction.html</loc>
<lastmod>2024-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2024/04/01/felix-optimizing-tensor-programs-with-gradient-descent.html</loc>
<lastmod>2024-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2024/04/01/hydride-a-retargetable-and-extensible-synthesis-based-compiler-for-modern-hardware-architectures.html</loc>
<lastmod>2024-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2024/04/01/leave-no-context-behind-efficient-infinite-context-transformers-with-infini-attention.html</loc>
<lastmod>2024-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/linear/2024/04/01/linear-attention-sequence-parallelism.html</loc>
<lastmod>2024-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2024/04/01/magis-memory-optimization-via-coordinated-graph-transformation-and-scheduling-for-dnn.html</loc>
<lastmod>2024-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2024/04/01/optimizing-deep-learning-inference-via-global-analysis-and-tensor-expressions.html</loc>
<lastmod>2024-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2024/04/01/optimizing-dynamic-shape-neural-networks-on-accelerators-via-on-the-fly-micro-kernel-polymerization.html</loc>
<lastmod>2024-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2024/04/01/prompt-cache-modular-attention-reuse-for-low-latency-inference.html</loc>
<lastmod>2024-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2024/04/01/proteus-a-high-throughput-inference-serving-system-with-accuracy-scaling.html</loc>
<lastmod>2024-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2024/04/01/pytorch-2-faster-machine-learning-through-dynamic-python-bytecode-transformation-and-graph-compilation.html</loc>
<lastmod>2024-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2024/04/01/scaling-up-memory-disaggregated-applications-with-smart.html</loc>
<lastmod>2024-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2024/04/01/shortcut-connected-expert-parallelism-for-accelerating-mixture-of-experts.html</loc>
<lastmod>2024-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2024/05/01/cacheblend-fast-large-language-model-serving-for-rag-with-cached-knowledge-fusion.html</loc>
<lastmod>2024-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2024/05/01/efficient-heterogeneous-large-language-model-decoding-with-model-attention-disaggregation.html</loc>
<lastmod>2024-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/eval/2024/05/01/livecodebench-holistic-and-contamination-free-evaluation-of-large-language-models-for-code.html</loc>
<lastmod>2024-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2024/05/01/memoe-enhancing-model-editing-with-mixture-of-experts-adaptors.html</loc>
<lastmod>2024-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2024/05/01/nemo-aligner-scalable-toolkit-for-efficient-model-alignment.html</loc>
<lastmod>2024-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2024/05/01/openrlhf-an-easy-to-use-scalable-and-high-performance-rlhf-framework.html</loc>
<lastmod>2024-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2024/05/01/pipeline-parallelism-with-controllable-memory.html</loc>
<lastmod>2024-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2024/05/01/preble-efficient-distributed-prompt-scheduling-for-llm-serving.html</loc>
<lastmod>2024-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2024/05/01/splitwise-efficient-generative-llm-inference-using-phase-splitting.html</loc>
<lastmod>2024-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/pretrain_sft/2024/05/01/stacking-your-transformers-a-closer-look-at-model-growth-for-efficient-llm-pre-training.html</loc>
<lastmod>2024-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/ssm/2024/05/01/transformers-are-ssms-generalized-models-and-efficient-algorithms-through-structured-state-space-duality.html</loc>
<lastmod>2024-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/linear/2024/05/01/various-lengths-constant-speed-efficient-language-modeling-with-lightning-attention.html</loc>
<lastmod>2024-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2024/05/01/you-only-cache-once-decoder-decoder-architectures-for-language-models.html</loc>
<lastmod>2024-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2024/06/01/deepseek-v2-a-strong-economical-and-efficient-mixture-of-experts-language-model.html</loc>
<lastmod>2024-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2024/06/01/eagle-2-faster-inference-of-language-models-with-dynamic-draft-trees.html</loc>
<lastmod>2024-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2024/06/01/medusa-simple-llm-inference-acceleration-framework-with-multiple-decoding-heads.html</loc>
<lastmod>2024-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2024/06/01/mind-the-gap-attainable-data-movement-and-operational-intensity-bounds-for-tensor-algorithms.html</loc>
<lastmod>2024-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2024/06/01/protrain-efficient-llm-training-via-adaptive-memory-management.html</loc>
<lastmod>2024-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2024/06/01/universal-checkpointing-a-flexible-and-efficient-distributed-checkpointing-system-for-large-scale-dnn-training-with-reconfigurable-parallelism.html</loc>
<lastmod>2024-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2024/07/01/accelerating-the-training-of-large-language-models-using-efficient-activation-rematerialization-and-optimal-hybrid-parallelism.html</loc>
<lastmod>2024-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2024/07/01/cost-efficient-large-language-model-serving-for-multi-turn-conversations-with-cachedattention.html</loc>
<lastmod>2024-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2024/07/01/efficient-training-of-large-language-models-on-distributed-infrastructures-a-survey.html</loc>
<lastmod>2024-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2024/07/01/enabling-tensor-language-model-to-assist-in-generating-high-performance-tensor-programs-for-deep-learning.html</loc>
<lastmod>2024-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2024/07/01/flashattention-3-fast-and-accurate-attention-with-asynchrony-and-low-precision.html</loc>
<lastmod>2024-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2024/07/01/helpsteer2-open-source-dataset-for-training-top-performing-reward-models.html</loc>
<lastmod>2024-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2024/07/01/ladder-enabling-efficient-low-precision-deep-learning-computing-through-hardware-aware-tensor-transformation.html</loc>
<lastmod>2024-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2024/07/01/magpy-compiling-eager-mode-dnn-programs-by-monitoring-execution-states.html</loc>
<lastmod>2024-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/sparsity/2024/07/01/minference-10-accelerating-pre-filling-for-long-context-llms-via-dynamic-sparse-attention.html</loc>
<lastmod>2024-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2024/07/01/mooncake-a-kvcache-centric-disaggregated-architecture-for-llm-serving.html</loc>
<lastmod>2024-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2024/07/01/nvidia-blackwell-architecture-technical-brief.html</loc>
<lastmod>2024-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/transformer-variant/2024/07/01/scaling-laws-with-vocabulary-larger-models-deserve-larger-vocabularies.html</loc>
<lastmod>2024-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2024/07/01/sglang-efficient-execution-of-structured-language-model-programs.html</loc>
<lastmod>2024-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2024/07/01/the-llama-3-herd-of-models.html</loc>
<lastmod>2024-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2024/07/01/usp-a-unified-sequence-parallelism-approach-for-long-context-generative-ai.html</loc>
<lastmod>2024-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2024/08/01/auxiliary-loss-free-load-balancing-strategy-for-mixture-of-experts.html</loc>
<lastmod>2024-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2024/08/01/disttrain-addressing-model-and-data-heterogeneity-with-disaggregated-training-for-multimodal-large-language-models.html</loc>
<lastmod>2024-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/pretrain_sft/2024/08/01/fusechat-knowledge-fusion-of-chat-models.html</loc>
<lastmod>2024-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/pretrain_sft/2024/08/01/inference-scaling-laws-an-empirical-analysis-of-compute-optimal-inference-for-llm-problem-solving.html</loc>
<lastmod>2024-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2024/08/01/lut-tensor-core-a-software-hardware-co-design-for-lut-based-low-bit-llm-inference.html</loc>
<lastmod>2024-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2024/08/01/magicdec-breaking-the-latency-throughput-tradeoff-for-long-context-generation-with-speculative-decoding.html</loc>
<lastmod>2024-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2024/08/01/marlin-mixed-precision-auto-regressive-parallel-inference-on-large-language-models.html</loc>
<lastmod>2024-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2024/08/01/nanoflow-towards-optimal-large-language-model-serving-throughput.html</loc>
<lastmod>2024-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/diffusions/dllm/2024/08/01/pipefusion-patch-level-pipeline-parallelism-for-diffusion-transformers-inference.html</loc>
<lastmod>2024-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2024/08/01/scaling-llm-test-time-compute-optimally-can-be-more-effective-than-scaling-model-parameters.html</loc>
<lastmod>2024-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2024/09/01/domino-eliminating-communication-in-llm-training-via-generic-tensor-slicing-and-overlapping.html</loc>
<lastmod>2024-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2024/09/01/hexiscale-accommodating-large-language-model-training-over-heterogeneous-environment.html</loc>
<lastmod>2024-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/agent/2024/09/01/large-language-model-based-agents-for-software-engineering-a-survey.html</loc>
<lastmod>2024-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/nccl/2024/09/01/memory-efficiency-faster-initialization-and-cost-estimation-with-nvidia-collective-communications-library-222.html</loc>
<lastmod>2024-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2024/09/01/mnemosyne-parallelization-strategies-for-efficiently-serving-multi-million-context-length-llm-inference-requests-without-approximations.html</loc>
<lastmod>2024-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2024/09/01/prescount-effective-register-allocation-for-bank-conflict-reduction.html</loc>
<lastmod>2024-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2024/09/01/retargeting-and-respecializing-gpu-workloads-for-performance-portability.html</loc>
<lastmod>2024-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2024/09/01/rlhfuse-efficient-rlhf-training-for-large-language-models-with-inter-and-intra-stage-fusion.html</loc>
<lastmod>2024-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/eval/2024/09/01/swe-bench-can-language-models-resolve-real-world-github-issues.html</loc>
<lastmod>2024-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2024/10/01/do-large-language-models-need-a-content-delivery-network.html</loc>
<lastmod>2024-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2024/10/01/duoattention-efficient-long-context-llm-inference-with-retrieval-and-streaming-heads.html</loc>
<lastmod>2024-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2024/10/01/eps-moe-expert-pipeline-scheduler-for-cost-efficient-moe-inference.html</loc>
<lastmod>2024-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/compiler/2024/10/01/flux-fast-software-based-communication-overlap-on-gpus-through-kernel-fusion.html</loc>
<lastmod>2024-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2024/10/01/hybridflow-a-flexible-and-efficient-rlhf-framework.html</loc>
<lastmod>2024-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2024/10/01/moe-accelerating-mixture-of-experts-methods-with-zero-computation-experts.html</loc>
<lastmod>2024-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2024/10/01/sageattention-accurate-8-bit-attention-for-plug-and-play-inference-acceleration.html</loc>
<lastmod>2024-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2024/11/01/microscopiq-accelerating-foundational-models-through-outlier-aware-microscaling-quantization.html</loc>
<lastmod>2024-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2024/11/01/minder-faulty-machine-detection-for-large-scale-distributed-model-training.html</loc>
<lastmod>2024-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2024/11/01/sageattention2-efficient-attention-with-thorough-outlier-smoothing-and-per-thread-int4-quantization.html</loc>
<lastmod>2024-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2024/11/01/uncovering-real-gpu-noc-characteristics-implications-on-interconnect-architecture.html</loc>
<lastmod>2024-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2024/12/01/batchllm-optimizing-large-batched-llm-inference-with-global-prefix-sharing-and-throughput-oriented-token-batching.html</loc>
<lastmod>2024-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2024/12/01/deepseek-v3-technical-report.html</loc>
<lastmod>2024-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/compiler/2024/12/01/flex-attention-a-programming-model-for-generating-optimized-attention-kernels.html</loc>
<lastmod>2024-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/ssm/2024/12/01/gated-delta-networks-improving-mamba2-with-delta-rule.html</loc>
<lastmod>2024-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2024/12/01/mixllm-llm-quantization-with-global-mixed-precision-between-output-features-and-highly-efficient-system-design.html</loc>
<lastmod>2024-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/pretrain_sft/2024/12/01/unveiling-the-secret-recipe-a-guide-for-supervised-fine-tuning-small-llms.html</loc>
<lastmod>2024-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/01/01/accelerating-design-space-exploration-for-llm-training-systems-with-multi-experiment-parallel-simulation.html</loc>
<lastmod>2025-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2025/01/01/autoccl-automated-collective-communication-tuning-for-accelerating-distributed-and-parallel-dnn-training.html</loc>
<lastmod>2025-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2025/01/01/decdec-a-systems-approach-to-advancing-low-bit-llm-quantization.html</loc>
<lastmod>2025-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2025/01/01/deft-decoding-with-flash-tree-attention-for-efficient-tree-structured-llm-inference.html</loc>
<lastmod>2025-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2025/01/01/dissecting-and-modeling-the-architecture-of-modern-gpu-cores.html</loc>
<lastmod>2025-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/01/01/enabling-efficient-gpu-communication-over-multiple-nics-with-fuselink.html</loc>
<lastmod>2025-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/01/01/flexpipe-maximizing-training-efficiency-for-transformer-based-models-with-variable-length-inputs.html</loc>
<lastmod>2025-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/01/01/jenga-enhancing-llm-long-context-fine-tuning-with-contextual-token-sparsity.html</loc>
<lastmod>2025-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/01/01/kimi-k15-scaling-reinforcement-learning-with-llms.html</loc>
<lastmod>2025-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/linear/2025/01/01/minimax-01-scaling-foundation-models-with-lightning-attention.html</loc>
<lastmod>2025-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/nccl/2025/01/01/new-scaling-algorithm-and-initialization-with-nvidia-collective-communications-library-223.html</loc>
<lastmod>2025-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2025/01/01/nvidia-blackwell.html</loc>
<lastmod>2025-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2025/01/01/nvidia-dgx-b300.html</loc>
<lastmod>2025-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2025/01/01/nvidia-rtx-blackwell-gpu-architecture.html</loc>
<lastmod>2025-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/01/01/obscura-concealing-recomputation-overhead-in-training-of-large-language-models-with-bubble-filling-pipeline-transformation.html</loc>
<lastmod>2025-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2025/01/01/pipethreader-software-defined-pipelining-for-efficient-dnn-execution.html</loc>
<lastmod>2025-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2025/01/01/popfetcher-towards-accelerated-mixture-of-experts-training-via-popularity-based-expert-wise-prefetch.html</loc>
<lastmod>2025-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/system/2025/01/01/principles-and-methodologies-for-serial-performance-optimization.html</loc>
<lastmod>2025-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/01/01/qfactory-accelerating-quantized-large-language-model-serving-with-qtile-graphs.html</loc>
<lastmod>2025-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/01/01/weaver-efficient-multi-llm-serving-with-attention-offloading.html</loc>
<lastmod>2025-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/01/01/zen-empowering-distributed-training-with-sparsity-driven-data-synchronization.html</loc>
<lastmod>2025-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/02/01/bytescale-efficient-scaling-of-llm-training-with-a-2048k-context-length-on-more-than-12000-gpus.html</loc>
<lastmod>2025-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/02/01/dreamddp-accelerating-data-parallel-distributed-llm-training-with-layer-wise-scheduled-partial-synchronization.html</loc>
<lastmod>2025-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2025/02/01/easyspec-layer-parallel-speculative-decoding-for-efficient-multi-gpu-utilization.html</loc>
<lastmod>2025-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/sparsity/2025/02/01/flexprefill-a-context-aware-sparse-attention-mechanism-for-efficient-long-sequence-inference.html</loc>
<lastmod>2025-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2025/02/01/fmoe-fine-grained-expert-offloading-for-large-mixture-of-experts-serving.html</loc>
<lastmod>2025-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/02/01/kvlink-accelerating-large-language-models-via-efficient-kv-cache-reuse.html</loc>
<lastmod>2025-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2025/02/01/lasp-2-rethinking-sequence-parallelism-for-linear-attention-and-its-hybrid.html</loc>
<lastmod>2025-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/02/01/mario-near-zero-cost-activation-checkpointing-in-pipeline-parallelism.html</loc>
<lastmod>2025-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/sparsity/2025/02/01/moba-mixture-of-block-attention-for-long-context-llms.html</loc>
<lastmod>2025-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/sparsity/2025/02/01/native-sparse-attention-hardware-aligned-and-natively-trainable-sparse-attention.html</loc>
<lastmod>2025-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/transformer-variant/2025/02/01/reasoning-with-latent-thoughts-on-the-power-of-looped-transformers.html</loc>
<lastmod>2025-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/pretrain_sft/2025/02/01/scaling-up-muon-for-large-scale-language-model-training.html</loc>
<lastmod>2025-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/02/01/training-llms-with-mxfp4.html</loc>
<lastmod>2025-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2025/02/01/tree-attention-topology-aware-decoding-for-long-context-attention-on-gpu-clusters.html</loc>
<lastmod>2025-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/02/01/twilight-adaptive-attention-sparsity-with-hierarchical-top-p-pruning.html</loc>
<lastmod>2025-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/02/01/weipipe-weight-pipeline-parallelism-for-communication-effective-long-context-large-model-training.html</loc>
<lastmod>2025-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/03/01/communication-efficient-language-model-training-scales-reliably-and-robustly-scaling-laws-for-diloco.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2025/03/01/dissecting-and-modeling-the-architecture-of-modern-gpu-cores.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2025/03/01/eagle-3-scaling-up-inference-acceleration-of-large-language-models-via-training-time-test.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/nccl/2025/03/01/networking-reliability-and-observability-at-scale-with-nccl-224.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2025/03/01/neutrino-fine-grained-gpu-kernel-profiling-via-programmable-probing.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/03/01/numerical-error-analysis-of-large-language-models.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2025/03/01/oaken-fast-and-efficient-llm-serving-with-online-offline-hybrid-kv-cache-quantization.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/agent/2025/03/01/r1-searcher-incentivizing-the-search-capability-in-llms-via-reinforcement-learning.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/pretrain_sft/2025/03/01/samplemix-a-sample-wise-pre-training-data-mixing-strategey-by-coordinating-data-quality-and-diversity.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/agent/2025/03/01/search-r1-training-llms-to-reason-and-leverage-search-engines-with-reinforcement-learning.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2025/03/01/tapered-off-policy-reinforce-stable-and-efficient-reinforcement-learning-for-llms.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2025/03/01/tiled-flash-linear-attention-more-efficient-linear-rnn-and-xlstm-kernels.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2025/03/01/ub-mesh-a-hierarchically-localized-nd-fullmesh-datacenter-network-architecture.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/03/01/understanding-stragglers-in-large-model-training-using-what-if-analysis.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/03/01/wlb-llm-workload-balanced-4d-parallelism-for-large-language-model-training.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/sparsity/2025/03/01/xattention-block-sparse-attention-with-antidiagonal-scoring.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/2025/04/01/a-little-goes-a-long-way-efficient-long-context-training-and-inference-with-partial-contexts.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/pretrain_sft/pruning/2025/04/01/beware-of-calibration-data-for-pruning-large-language-models.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2025/04/01/block-verification-accelerates-speculative-decoding.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/04/01/bytecheckpoint-a-unified-checkpointing-system-for-large-foundation-model-development.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2025/04/01/cbq-cross-block-quantization-for-large-language-models.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2025/04/01/deepcoder-a-fully-open-source-14b-coder-at-o3-mini-level.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2025/04/01/distributed-speculative-inference-dsi-speculation-parallelism-for-provably-faster-lossless-language-model-inference.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2025/04/01/effective-interplay-between-sparsity-and-quantization-from-theory-to-practice.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2025/04/01/exploring-data-scaling-trends-and-effects-in-reinforcement-learning-from-human-feedback.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2025/04/01/faster-cascades-via-speculative-decoding.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/04/01/fiddler-cpu-gpu-orchestration-for-fast-inference-of-mixture-of-experts-models.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2025/04/01/flashmask-efficient-and-rich-mask-extension-of-flashattention.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/04/01/helios-adaptive-model-and-early-exit-selection-for-efficient-llm-inference-serving.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/pretrain_sft/2025/04/01/how-does-critical-batch-size-scale-in-pretraining.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/transformer-variant/2025/04/01/hyper-connections.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2025/04/01/introducing-ualink-200g-10-specification.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/04/01/long-context-compression-with-activation-beacon.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/agent/2025/04/01/mem0-building-production-ready-ai-agents-with-scalable-long-term-memory.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2025/04/01/mixture-of-attentions-for-speculative-decoding.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2025/04/01/multi-draft-speculative-sampling-canonical-decomposition-and-theoretical-limits.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/04/01/nemotron-h-a-family-of-accurate-and-efficient-hybrid-mamba-transformer-models.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2025/04/01/netmoe-accelerating-moe-training-through-dynamic-sample-placement.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/04/01/not-all-heads-matter-a-head-level-kv-cache-compression-method-with-integrated-retrieval-and-reasoning.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/04/01/omnikv-dynamic-context-selection-for-efficient-long-context-llms.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2025/04/01/ozaki-scheme-ii-a-gemm-oriented-emulation-of-floating-point-matrix-multiplication-using-an-integer-modular-technique.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/pretrain_sft/pruning/2025/04/01/probe-pruning-accelerating-llms-through-dynamic-pruning-via-model-probing.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2025/04/01/progressive-mixed-precision-decoding-for-efficient-llm-inference.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/04/01/razorattention-efficient-kv-cache-compression-through-retrieval-heads.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2025/04/01/retool-reinforcement-learning-for-strategic-tool-use-in-llms.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/04/01/scaling-fp8-training-to-trillion-token-llms.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2025/04/01/scaling-laws-for-precision.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/04/01/scbench-a-kv-cache-centric-analysis-of-long-context-methods.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/04/01/seed15-thinking-advancing-superb-reasoning-models-with-reinforcement-learning.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/04/01/simai-unifying-architecture-design-and-performance-tuning-for-large-scale-large-language-model-training-with-scalability-and-precision.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/04/01/squeezeattention-2d-management-of-kvcache-in-llm-inference-via-layer-wise-optimal-budget.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2025/04/01/streamrl-scalable-heterogeneous-and-elastic-rl-for-llms-with-disaggregated-stream-generation.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2025/04/01/swift-on-the-fly-self-speculative-decoding-for-llm-inference-acceleration.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/compiler/2025/04/01/thunderkittens-simple-fast-and-adorable-kernels.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/compiler/2025/04/01/tilelang-a-composable-tiled-programming-model-for-ai-systems.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/compiler/2025/04/01/tilelink-generating-efficient-compute-communication-overlapping-kernels-using-tile-centric-primitives.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/04/01/tilus-a-virtual-machine-for-arbitrary-low-precision-gpgpu-computation-in-llm-serving.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/04/01/torchtitan-one-stop-pytorch-native-solution-for-production-ready-llm-pretraining.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2025/04/01/towards-optimal-multi-draft-speculative-decoding.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2025/04/01/turboquant-online-vector-quantization-with-near-optimal-distortion-rate.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/04/01/vl-cache-sparsity-and-modality-aware-kv-cache-compression-for-vision-language-model-inference-acceleration.html</loc>
<lastmod>2025-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/agent/2025/05/01/a-survey-on-test-time-scaling-in-large-language-models-what-how-where-and-how-well.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/diffusions/dllm/2025/05/01/accelerating-diffusion-llms-via-adaptive-parallel-decoding.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2025/05/01/areal-a-large-scale-asynchronous-reinforcement-learning-system-for-language-reasoning.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2025/05/01/dapo-an-open-source-llm-reinforcement-learning-system-at-scale.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/05/01/ecco-improving-memory-bandwidth-and-capacity-for-llms-via-entropy-aware-cache-compression.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2025/05/01/flashmla-etap-efficient-transpose-attention-pipeline-for-accelerating-mla-inference-on-nvidia-h20-gpus.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/compiler/2025/05/01/flashtensor-optimizing-tensor-programs-by-leveraging-fine-grained-tensor-property.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/05/01/gllm-global-balanced-pipeline-parallelism-system-for-distributed-llm-serving-with-token-throttling.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/05/01/insights-into-deepseek-v3-scaling-challenges-and-reflections-on-hardware-for-ai-architectures.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2025/05/01/intellect-2-a-reasoning-model-trained-through-globally-decentralized-reinforcement-learning.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/tensor/2025/05/01/kperfir-towards-an-open-and-compiler-centric-ecosystem-for-gpu-kernel-performance-tooling-on-modern-ai-workloads.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2025/05/01/llamarl-a-distributed-asynchronous-reinforcement-learning-framework-for-efficient-large-scale-llm-training.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/05/01/mimo-unlocking-the-reasoning-potential-of-language-model-from-pretraining-to-posttraining.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2025/05/01/moesd-unveil-speculative-decodings-potential-for-accelerating-sparse-moe.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/05/01/prism-unleashing-gpu-sharing-for-cost-efficient-multi-llm-serving.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2025/05/01/prorl-prolonged-reinforcement-learning-expands-reasoning-boundaries-in-large-language-models.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/05/01/qwen3-technical-report.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2025/05/01/recipes-for-pre-training-llms-with-mxfp8.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2025/05/01/sageattention3-microscaling-fp4-attention-for-inference-and-an-exploration-of-8-bit-training.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/diffusions/dllm/2025/05/01/sparse-videogen2-accelerate-video-generation-with-sparse-attention-via-semantic-aware-permutation.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2025/05/01/the-entropy-mechanism-of-reinforcement-learning-for-reasoning-language-models.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/05/01/tokenweave-efficient-compute-communication-overlap-for-distributed-llm-inference.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/06/01/compress-gather-and-recompute-reforming-long-context-processing-in-transformers.html</loc>
<lastmod>2025-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/agent/2025/06/01/contextcache-context-aware-semantic-cache-for-multi-turn-queries-in-large-language-models.html</loc>
<lastmod>2025-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/06/01/cost-efficient-llm-training-with-lifetime-aware-tensor-offloading-via-gpudirect-storage.html</loc>
<lastmod>2025-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/system/2025/06/01/decomposing-craft-an-elementary-grammar-for-sharing-expertise-in-craft-workflows.html</loc>
<lastmod>2025-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/06/01/dotsllm1-technical-report.html</loc>
<lastmod>2025-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2025/06/01/flashdmoe-fast-distributed-moe-in-a-single-kernel.html</loc>
<lastmod>2025-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/2025/06/01/gated-attention-for-large-language-models-non-linearity-sparsity-and-attention-sink-free.html</loc>
<lastmod>2025-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/nccl/2025/06/01/improved-performance-and-monitoring-capabilities-with-nvidia-collective-communications-library-226.html</loc>
<lastmod>2025-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/06/01/kvcache-cache-in-the-wild-characterizing-and-optimizing-kvcache-cache-at-a-large-cloud-provider.html</loc>
<lastmod>2025-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/system/2025/06/01/leann-a-low-storage-overhead-vector-index.html</loc>
<lastmod>2025-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/06/01/lia-a-single-gpu-llm-inference-acceleration-with-cooperative-amx-enabled-cpu-gpu-computation-and-cxl-offloading.html</loc>
<lastmod>2025-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/06/01/meshslice-efficient-2d-tensor-parallelism-for-distributed-dnn-training.html</loc>
<lastmod>2025-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/compiler/2025/06/01/mirage-a-multi-level-superoptimizer-for-tensor-programs.html</loc>
<lastmod>2025-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/2025/06/01/multipole-attention-for-efficient-long-context-reasoning.html</loc>
<lastmod>2025-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/06/01/scaling-llama-3-training-with-efficient-parallelism-strategies.html</loc>
<lastmod>2025-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2025/06/01/scaling-speculative-decoding-with-lookahead-reasoning.html</loc>
<lastmod>2025-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2025/06/01/serving-large-language-models-on-huawei-cloudmatrix384.html</loc>
<lastmod>2025-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/2025/06/01/spark-transformer-reactivating-sparsity-in-ffn-and-attention.html</loc>
<lastmod>2025-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/06/01/streambp-memory-efficient-exact-backpropagation-for-long-sequence-training-of-llms.html</loc>
<lastmod>2025-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/06/01/td-pipe-temporally-disaggregated-pipeline-parallelism-architecture-for-high-throughput-llm-inference.html</loc>
<lastmod>2025-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/06/01/understanding-and-mitigating-numerical-sources-of-nondeterminism-in-llm-inference.html</loc>
<lastmod>2025-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2025/06/01/yggdrasil-bridging-dynamic-speculation-and-static-runtime-for-latency-optimal-tree-based-llm-decoding.html</loc>
<lastmod>2025-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2025/07/01/asyncflow-an-asynchronous-streaming-rl-framework-for-efficient-llm-post-training.html</loc>
<lastmod>2025-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2025/07/01/demystifying-nccl-an-in-depth-analysis-of-gpu-communication-protocols-and-algorithms.html</loc>
<lastmod>2025-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2025/07/01/dissecting-the-nvidia-blackwell-architecture-with-microbenchmarks.html</loc>
<lastmod>2025-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2025/07/01/distflow-a-fully-distributed-rl-framework-for-scalable-and-efficient-llm-post-training.html</loc>
<lastmod>2025-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/compiler/graph/2025/07/01/elk-exploring-the-efficiency-of-inter-core-connected-ai-chips-with-deep-learning-compiler-techniques.html</loc>
<lastmod>2025-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/nccl/2025/07/01/enabling-fast-inference-and-resilient-training-with-nccl-227.html</loc>
<lastmod>2025-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/diffusions/dllm/2025/07/01/fast-dllm-training-free-acceleration-of-diffusion-llm-by-enabling-kv-cache-and-parallel-decoding.html</loc>
<lastmod>2025-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2025/07/01/group-sequence-policy-optimization.html</loc>
<lastmod>2025-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/07/01/helix-parallelism-rethinking-sharding-strategies-for-interactive-multi-million-token-llm-decoding.html</loc>
<lastmod>2025-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/07/01/kvflow-efficient-prefix-caching-for-accelerating-llm-based-multi-agent-workflows.html</loc>
<lastmod>2025-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/07/01/megascale-infer-serving-mixture-of-experts-at-scale-with-disaggregated-expert-parallelism.html</loc>
<lastmod>2025-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/agent/2025/07/01/memagent-reshaping-long-context-llm-with-multi-conv-rl-based-memory-agent.html</loc>
<lastmod>2025-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/ssm/2025/07/01/overcoming-long-context-limitations-of-state-space-models-via-context-dependent-sparse-attention.html</loc>
<lastmod>2025-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2025/07/01/scale-up-ethernet-framework-scale-up-ethernet-framework-specification.html</loc>
<lastmod>2025-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/07/01/step-3-is-large-yet-affordable-model-system-co-design-for-cost-effective-decoding.html</loc>
<lastmod>2025-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/07/01/zeco-zero-communication-overhead-sequence-parallelism-for-linear-attention.html</loc>
<lastmod>2025-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2025/08/01/an-extensible-software-transport-layer-for-gpu-networking.html</loc>
<lastmod>2025-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/linear/2025/08/01/artificial-hippocampus-networks-for-efficient-long-context-modeling.html</loc>
<lastmod>2025-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/compiler/2025/08/01/clusterfusion-expanding-operator-fusion-scope-for-llm-inference-via-cluster-level-collective-primitive.html</loc>
<lastmod>2025-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/08/01/fp4-all-the-way-fully-quantized-training-of-llms.html</loc>
<lastmod>2025-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/08/01/glm-45-agentic-reasoning-and-coding-arc-foundation-models.html</loc>
<lastmod>2025-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/transformer-variant/2025/08/01/hierarchical-reasoning-model.html</loc>
<lastmod>2025-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/08/01/kimi-k2-open-agentic-intelligence.html</loc>
<lastmod>2025-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/08/01/kling-omni-technical-report.html</loc>
<lastmod>2025-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/08/01/longcat-flash-technical-report.html</loc>
<lastmod>2025-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/eval/2025/08/01/mcp-bench-benchmarking-tool-using-llm-agents-with-complex-real-world-tasks-via-mcp-servers.html</loc>
<lastmod>2025-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2025/08/01/mixture-of-contexts-for-long-video-generation.html</loc>
<lastmod>2025-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/08/01/nvidia-nemotron-nano-2-an-accurate-and-efficient-hybrid-mamba-transformer-reasoning-model.html</loc>
<lastmod>2025-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2025/08/01/on-policy-rl-meets-off-policy-experts-harmonizing-supervised-fine-tuning-and-reinforcement-learning-via-dynamic-weighting.html</loc>
<lastmod>2025-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/08/01/optimus-accelerating-large-scale-multi-modal-llm-training-by-bubble-exploitation.html</loc>
<lastmod>2025-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2025/08/01/rstar2-agent-agentic-reasoning-technical-report.html</loc>
<lastmod>2025-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2025/08/01/seamlessflow-a-traineragent-isolation-rl-framework-achieving-bubble-free-pipelines-via-tag-scheduling.html</loc>
<lastmod>2025-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/08/01/towards-efficient-and-practical-gpu-multitasking-in-the-era-of-llm.html</loc>
<lastmod>2025-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/08/01/veomni-scaling-any-modality-model-training-with-model-centric-distributed-recipe-zoo.html</loc>
<lastmod>2025-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2025/08/01/your-efficient-rl-framework-secretly-brings-you-offpolicy-rl-training.html</loc>
<lastmod>2025-08-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/09/01/accurate-kv-cache-eviction-via-anchor-direction-projection-for-efficient-llm-inference.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2025/09/01/categorical-foundations-for-cute-layouts.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/09/01/defeating-nondeterminism-in-llm-inference.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2025/09/01/diep-adaptive-mixture-of-experts-compression-through-differentiable-expert-pruning.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/agent/2025/09/01/effective-context-engineering-for-ai-agents.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/09/01/efficient-pre-training-of-llms-via-topology-aware-communication-alignment-on-more-than-9600-gpus.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2025/09/01/expert-as-a-service-towards-efficient-scalable-and-robust-large-scale-moe-serving.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/2025/09/01/fast-attention-mechanisms-a-tale-of-parallelism.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/diffusions/dllm/2025/09/01/fast-dllm-v2-efficient-block-diffusion-llm.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2025/09/01/flowmoe-a-scalable-pipeline-scheduling-framework-for-distributed-mixture-of-experts-training.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/09/01/learned-prefix-caching-for-efficient-llm-inference.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/09/01/let-the-llm-stick-to-its-strengths-learning-to-route-economical-llm.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2025/09/01/liquidgemm-hardware-efficient-w4a8-gemm-kernel-for-high-performance-llm-serving.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/09/01/longcat-flash-thinking-technical-report.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/09/01/mimo-audio-audio-language-models-are-few-shot-learners.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2025/09/01/pipelinerl-faster-on-policy-reinforcement-learning-for-long-sequence-generation.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2025/09/01/q-palette-fractional-bit-quantizers-toward-optimal-bit-allocation-for-efficient-llm-deployment.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/09/01/robust-llm-training-infrastructure-at-bytedance.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/09/01/scaling-llm-test-time-compute-with-mobile-npu-on-smartphones.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2025/09/01/seerattention-self-distilled-attention-gating-for-efficient-long-context-prefilling.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2025/09/01/sla-beyond-sparsity-in-diffusion-transformers-via-fine-tunable-sparselinear-attention.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2025/09/01/streamtensor-make-tensors-stream-in-dataflow-accelerators-for-llms.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/agent/2025/09/01/the-landscape-of-agentic-reinforcement-learning-for-llms-a-survey.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/agent/2025/09/01/transcending-cost-quality-tradeoff-in-agent-serving-via-session-awareness.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/agent/2025/09/01/ui-tars-2-technical-report-advancing-gui-agent-with-multi-turn-reinforcement-learning.html</loc>
<lastmod>2025-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2025/10/01/axcore-a-quantization-aware-approximate-gemm-unit-for-llm-inference.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2025/10/01/cage-curvature-aware-gradient-estimation-for-accurate-quantization-aware-training.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/10/01/chunkkv-semantic-preserving-kv-cache-compression-for-efficient-long-context-llm-inference.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2025/10/01/coruscant-co-designing-gpu-kernel-and-sparse-tensor-core-to-advocate-unstructured-sparsity-in-efficient-llm-inference.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/10/01/deepseek-ocr-contexts-optical-compression.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/cpu/2025/10/01/dram-fault-classification-through-large-scale-field-monitoring-for-robust-memory-ras-management.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/10/01/efficient-long-context-language-model-training-by-core-attention-disaggregation.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/10/01/from-tokens-to-layers-redefining-stall-free-scheduling-for-llm-serving-with-layered-prefill.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2025/10/01/griffin-effective-token-alignment-for-faster-speculative-decoding.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/10/01/hierarchical-balance-packing-towards-efficient-supervised-fine-tuning-for-long-context-llm.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2025/10/01/hybridep-scaling-expert-parallelism-to-cross-datacenter-scenario-via-hybrid-expertdata-transmission.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/10/01/kelle-co-design-kv-caching-and-edram-for-efficient-llm-serving-in-edge-computing.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/10/01/kimi-linear-an-expressive-efficient-attention-architecture.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/10/01/kvcomm-online-cross-context-kv-cache-communication-for-efficient-llm-based-multi-agent-systems.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2025/10/01/leveraging-chiplet-locality-for-efficient-memory-mapping-in-multi-chip-module-gpus.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/10/01/longcat-flash-omni-technical-report.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/10/01/mixtures-of-subspaces-for-bandwidth-efficient-context-parallel-training.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/10/01/mtraining-distributed-dynamic-sparse-attention-for-efficient-ultra-long-context-training.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/10/01/mtraining-efficient-distributed-training-for-ultra-long-contexts-via-dynamic-sparse-attention.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2025/10/01/mx-pushing-the-limits-of-microscaling-formats-for-efficient-large-language-model-serving.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2025/10/01/netzip-algorithmhardware-co-design-of-in-network-lossless-compression-for-distributed-large-model-training.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2025/10/01/optimizing-all-to-all-collective-communication-with-fault-tolerance-on-torus-networks.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/transformer-variant/2025/10/01/parallel-loop-transformer-for-efficient-test-time-computation-scaling.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2025/10/01/previewing-uccl-ep-flexible-and-efficient-expert-parallelism-for-cloud-and-beyond.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2025/10/01/rdma-point-to-point-communication-for-llm-systems.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2025/10/01/skipreduce-interconnection-network-sparsity-to-accelerate-distributed-machine-learning.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2025/10/01/speculate-deep-and-accurate-lossless-and-training-free-acceleration-for-offloaded-llms-via-substitute-speculative-decoding.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2025/10/01/stabilizing-moe-reinforcement-learning-by-aligning-training-and-inference-routers.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2025/10/01/stratum-system-hardware-co-design-with-tiered-monolithic-3d-stackable-dram-for-efficient-moe-serving.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2025/10/01/supermesh-energy-efficient-collective-communications-for-accelerators.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/10/01/synergistic-tensor-and-pipeline-parallelism.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/10/01/tail-optimized-caching-for-llm-inference.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/10/01/tasp-topology-aware-sequence-parallelism.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/compiler/2025/10/01/tawa-automatic-warp-specialization-for-modern-gpus-with-asynchronous-references.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/10/01/towards-fully-fp8-gemm-llm-training-at-scale.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/10/01/when-to-reason-semantic-router-for-vllm.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/2025/10/01/why-low-precision-transformer-training-fails-an-analysis-on-flash-attention.html</loc>
<lastmod>2025-10-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2025/11/01/beat-the-long-tail-distribution-aware-speculative-decoding-for-rl-training.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/11/01/contextpilot-fast-long-context-inference-via-context-reuse.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/11/01/deepseek-v32-exp-boosting-long-context-efficiency-with-deepseek-sparse-attention.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2025/11/01/deterministic-inference-across-tensor-parallel-sizes-that-eliminates-traininginference-mismatch.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/pretrain_sft/2025/11/01/evolm-in-search-of-lost-language-model-training-dynamics.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2025/11/01/farskip-collective-unhobbling-blocking-communication-in-mixture-of-experts-models.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2025/11/01/flashmoe-fast-distributed-moe-in-a-single-kernel.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/11/01/flexicache-leveraging-temporal-stability-of-attention-heads-for-efficient-kv-cache-management.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2025/11/01/fp8-flow-moe-a-casting-free-fp8-recipe-without-double-quantization-error.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/nccl/2025/11/01/fusing-communication-and-compute-with-new-device-api-and-copy-engine-collectives-in-nvidia-nccl-228.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/11/01/gemini-3-pro-model-card.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2025/11/01/gpu-initiated-networking-for-nccl.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/11/01/hunyuanocr-technical-report.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2025/11/01/intattention-a-fully-integer-attention-pipeline-for-efficient-edge-inference.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/11/01/kitty-accurate-and-efficient-2-bit-kv-cache-quantization-with-dynamic-channel-wise-precision-boost.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/pretrain_sft/2025/11/01/nested-learning-the-illusion-of-deep-learning-architectures.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2025/11/01/opportunistic-expert-activation-batch-aware-expert-routing-for-faster-decode-without-retraining.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/11/01/quartet-native-fp4-training-can-be-optimal-for-large-language-models.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2025/11/01/seer-online-context-learning-for-fast-synchronous-llm-reinforcement-learning.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2025/11/01/suffixdecoding-extreme-speculative-decoding-for-emerging-ai-applications.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/11/01/system-card-claude-opus-45.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2025/11/01/tensor-parallelism-with-partially-synchronized-activations.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2025/11/01/tree-training-accelerating-agentic-llms-training-via-shared-prefix-reuse.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/2025/11/01/virtual-width-networks.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/2025/11/01/weight-sparse-transformers-have-interpretable-circuits.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2025/12/01/accelerating-large-scale-reasoning-model-inference-self-speculative-decoding-with-sparse-attention.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/2025/12/01/beluga-a-cxl-based-memory-architecture-for-scalable-and-efficient-llm-kvcache-management.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/sparsity/2025/12/01/blasst-dynamic-blocked-attention-sparsity-via-softmax-thresholding.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/low_precision/2025/12/01/codegemm-a-codebook-centric-approach-to-efficient-gemm-in-quantized-llms.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/12/01/deepseek-v32-pushing-the-frontier-of-open-large-language-models.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/12/01/dynapipe-dynamic-layer-redistribution-for-efficient-serving-of-llms-with-pipeline-parallelism.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/12/01/efficient-low-rank-attention-for-long-context-inference-in-large-language-models.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2025/12/01/mesh-attention-a-new-communication-efficient-distributed-attention-with-improved-data-locality.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/2025/12/01/mhc-manifold-constrained-hyper-connections.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/12/01/mimo-v2-flash-technical-report.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/compiler/2025/12/01/mirage-persistent-kernel-a-compiler-and-runtime-for-mega-kernelizing-tensor-programs.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2025/12/01/mma-sim-bit-accurate-reference-model-of-tensor-cores-and-matrix-cores.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2025/12/01/native-parallel-reasoner-reasoning-in-parallelism-via-self-distilled-reinforcement-learning.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/12/01/nemotron-3-nano-open-efficient-mixture-of-experts-hybrid-mamba-transformer-model-for-agentic-reasoning.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/12/01/nvidia-nemotron-3-efficient-and-open-intelligence.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2025/12/01/on-the-interplay-of-pre-training-mid-training-and-rl-on-reasoning-language-models.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/12/01/qwenlong-l15-post-training-recipe-for-long-context-reasoning-and-memory-management.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/diffusions/dllm/2025/12/01/radial-attention-on-log-n-sparse-attention-with-energy-decay-for-long-video-generation.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2025/12/01/rlax-large-scale-distributed-reinforcement-learning-for-large-language-models-on-tpus.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/12/01/seed18-model-card-towards-generalized-real-world-agency.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2025/12/01/skipkv-selective-skipping-of-kv-generation-and-storage-for-efficient-inference-with-large-reasoning-models.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2025/12/01/skrull-towards-efficient-long-context-fine-tuning-through-dynamic-data-scheduling.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/pretrain_sft/2025/12/01/skyladder-better-and-faster-pretraining-via-context-window-scheduling.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2025/12/01/sonicmoe-accelerating-moe-with-io-and-tile-aware-optimizations.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2025/12/01/state-of-ai-an-empirical-100-trillion-token-study-with-openrouter.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/2025/12/01/tensor-product-attention-is-all-you-need.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/eval/2025/12/01/the-llm-evaluation-guidebook.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/agent/2025/12/01/towards-a-science-of-scaling-agent-systems.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/transformer-variant/2025/12/01/universal-reasoning-model.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2025/12/01/update-to-gpt-5-system-card-gpt-52.html</loc>
<lastmod>2025-12-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/pretrain_sft/2026/01/01/conditional-memory-via-scalable-lookup-a-new-axis-of-sparsity-for-large-language-models.html</loc>
<lastmod>2026-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2026/01/01/dflash-block-diffusion-for-flash-speculative-decoding.html</loc>
<lastmod>2026-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/transformer-variant/2026/01/01/fast-weight-product-key-memory.html</loc>
<lastmod>2026-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2026/01/01/flashattention-t-towards-fully-tensorized-attention-by-exploiting-tensor-vector-parallelism.html</loc>
<lastmod>2026-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2026/01/01/flashinfer-bench-building-the-virtuous-cycle-for-ai-driven-llm-systems.html</loc>
<lastmod>2026-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2026/01/01/iquest-coder-v1-technical-report.html</loc>
<lastmod>2026-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2026/01/01/jet-rl-enabling-on-policy-fp8-reinforcement-learning-with-unified-training-and-rollout-precision-flow.html</loc>
<lastmod>2026-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2026/01/01/laps-a-length-aware-prefill-llm-serving-system.html</loc>
<lastmod>2026-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2026/01/01/least-loaded-expert-parallelism-load-balancing-an-imbalanced-mixture-of-experts.html</loc>
<lastmod>2026-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2026/01/01/llm-42-enabling-determinism-in-llm-inference-with-verified-speculation.html</loc>
<lastmod>2026-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/transformer-variant/2026/01/01/mhc-lite-you-dont-need-20-sinkhorn-knopp-iterations.html</loc>
<lastmod>2026-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/transformer-variant/2026/01/01/mhla-restoring-expressivity-of-linear-attention-via-token-level-multi-head.html</loc>
<lastmod>2026-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2026/01/01/moeblaze-breaking-the-memory-wall-for-efficient-moe-training-on-modern-gpus.html</loc>
<lastmod>2026-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2026/01/01/scaling-embeddings-outperforms-scaling-experts-in-language-models.html</loc>
<lastmod>2026-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/framework/2026/01/01/vibetensor-system-software-for-deep-learning-fully-generated-by-ai-agents.html</loc>
<lastmod>2026-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2026/02/01/boute-cost-efficient-llm-serving-with-heterogeneous-llms-and-gpus-via-multi-objective-bayesian-optimization.html</loc>
<lastmod>2026-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/gpu/2026/02/01/cuda-agent-large-scale-agentic-rl-for-high-performance-cuda-kernel-generation.html</loc>
<lastmod>2026-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2026/02/01/dflash-block-diffusion-for-flash-speculative-decoding.html</loc>
<lastmod>2026-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2026/02/01/dualpath-breaking-the-storage-bandwidth-bottleneck-in-agentic-llm-inference.html</loc>
<lastmod>2026-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2026/02/01/echo-2-a-large-scale-distributed-rollout-framework-for-cost-efficient-reinforcement-learning.html</loc>
<lastmod>2026-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2026/02/01/forge-scalable-agent-rl-framework-and-algorithm.html</loc>
<lastmod>2026-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2026/02/01/glm-5-from-vibe-coding-to-agentic-engineering.html</loc>
<lastmod>2026-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2026/02/01/kimi-k25-visual-agentic-intelligence.html</loc>
<lastmod>2026-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2026/02/01/moe-spec-expert-budgeting-for-efficient-speculative-decoding.html</loc>
<lastmod>2026-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2026/02/01/p-eagle-parallel-drafting-eagle-with-scalable-training.html</loc>
<lastmod>2026-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2026/02/01/rlhfless-serverless-computing-for-efficient-rlhf.html</loc>
<lastmod>2026-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2026/02/01/seed20-model-card-towards-intelligence-frontier-for-real-world-complexity.html</loc>
<lastmod>2026-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2026/02/01/system-card-claude-opus-46.html</loc>
<lastmod>2026-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/rl/2026/02/01/understanding-and-exploiting-weight-update-sparsity-for-communication-efficient-distributed-rl.html</loc>
<lastmod>2026-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2026/03/01/arl-tangram-unleash-the-resource-efficiency-in-agentic-reinforcement-learning.html</loc>
<lastmod>2026-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/agent/2026/03/01/avo-agentic-variation-operators-for-autonomous-evolutionary-search.html</loc>
<lastmod>2026-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/agent/2026/03/01/do-phone-use-agents-respect-your-privacy.html</loc>
<lastmod>2026-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/attention/2026/03/01/flashattention-4-algorithm-and-kernel-pipelining-co-design-for-asymmetric-hardware-scaling.html</loc>
<lastmod>2026-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2026/03/01/flashprefill-instantaneous-pattern-discovery-and-thresholding-for-ultra-fast-long-context-prefilling.html</loc>
<lastmod>2026-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2026/03/01/heddle-a-distributed-orchestration-system-for-agentic-rl-rollout.html</loc>
<lastmod>2026-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/ssm/2026/03/01/mamba-3-improved-sequence-modeling-using-state-space-principles.html</loc>
<lastmod>2026-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/sparsity/2026/03/01/mixture-of-depths-attention.html</loc>
<lastmod>2026-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/networking/nccl/2026/03/01/nccl-ep-towards-a-unified-expert-parallel-communication-api-for-nccl.html</loc>
<lastmod>2026-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/train/2026/03/01/nest-network-and-memory-aware-device-placement-for-distributed-deep-learning.html</loc>
<lastmod>2026-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2026/03/01/scalable-training-of-mixture-of-experts-models-with-megatron-core.html</loc>
<lastmod>2026-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/speculative_decoding/2026/03/01/speculative-speculative-decoding.html</loc>
<lastmod>2026-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/architecture/attention/2026/03/01/technical-report-of-attention-residuals.html</loc>
<lastmod>2026-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/rl/2026/03/12/rlax-large-scale-distributed-reinforcement-learning-for-large-language-models-on-tpus.html</loc>
<lastmod>2026-03-12T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/agent/2026/03/25/composer-2-technical-report.html</loc>
<lastmod>2026-03-25T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/mlsys/system/2026/03/25/modern-code-review-a-case-study-at-google.html</loc>
<lastmod>2026-03-25T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/2026/04/01/dwdp-distributed-weight-data-parallelism-for-high-performance-llm-inference-on-nvl72.html</loc>
<lastmod>2026-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/moe/2026/04/01/scalable-pretraining-of-large-mixture-of-experts-language-models-on-aurora-super-computer.html</loc>
<lastmod>2026-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/algorithm/models/2026/04/01/system-card-claude-mythos-preview.html</loc>
<lastmod>2026-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/papers/llm/engineering/inference/kvcache/2026/04/01/tokendance-scaling-multi-agent-llm-serving-via-collective-kv-cache-sharing.html</loc>
<lastmod>2026-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2007/01/01/optimizing-parallel-reduction-in-cuda.html</loc>
<lastmod>2007-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/networking/2018/01/01/rdma-tutorial.html</loc>
<lastmod>2018-01-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2018/03/01/programming-tensor-cores-native-volta-tensor-core-gemm.html</loc>
<lastmod>2018-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cutlass/2020/05/01/developing-cuda-kernels-to-push-tensor-cores-to-the-absolute-limit-on-nvidia-a100.html</loc>
<lastmod>2020-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/networking/2020/06/01/rdma-with-gpu-memory-via-dma-buf.html</loc>
<lastmod>2020-06-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/networking/2020/07/01/reexamining-direct-cache-access-to-optimize-io-intensive-applications-for-multi-hundred-gigabit-networks.html</loc>
<lastmod>2020-07-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2021/04/01/accelerating-convolution-with-tensor-cores-in-cutlass.html</loc>
<lastmod>2021-04-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/hpc/2022/02/01/standard-parallelism.html</loc>
<lastmod>2022-02-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2022/03/01/accelerating-backward-data-gradient-by-increasing-tensor-core-utilization-in-cutlass.html</loc>
<lastmod>2022-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2022/03/01/automated-performance-improvement-using-cuda-link-time-optimization.html</loc>
<lastmod>2022-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2022/03/01/cuda-new-features-and-beyond.html</loc>
<lastmod>2022-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/network/2022/03/01/fast-inter-gpu-communication-with-nccl-for-deep-learning-training-and-more.html</loc>
<lastmod>2022-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2022/03/01/how-cuda-programming-works.html</loc>
<lastmod>2022-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2022/03/01/inside-the-nvidia-hopper-architecture.html</loc>
<lastmod>2022-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/network/2022/03/01/multi-gpu-programming-with-mpi.html</loc>
<lastmod>2022-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2022/03/01/optimizing-cuda-applications-for-nvidia-hopper-architecture.html</loc>
<lastmod>2022-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/network/2022/03/01/s41825-latest-on-nvidia-magnum-io-gpudirect-technologies.html</loc>
<lastmod>2022-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/framework/2022/03/01/tpat-tensorrt-plugin-autogen-tool.html</loc>
<lastmod>2022-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/hpc/2022/03/01/warp-a-high-performance-python-framework-for-gpu-simulation-and-graphics.html</loc>
<lastmod>2022-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/profile/2022/03/01/what-where-and-why-use-cuda-developer-tools-to-detect-locate-and-explain-bugs-and-bottlenecks.html</loc>
<lastmod>2022-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cutlass/2022/09/01/cutlass-python-api-enhancements-and-cutlass-30-preview.html</loc>
<lastmod>2022-09-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/network/2023/03/01/accelerating-data-movement-between-gpus-and-storage-or-memory.html</loc>
<lastmod>2023-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/profile/2023/03/01/become-faster-in-writing-performant-cuda-kernels-using-the-source-page-in-nsight-compute.html</loc>
<lastmod>2023-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2023/03/01/cuda-graphs-101.html</loc>
<lastmod>2023-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2023/03/01/cuda-new-features-and-beyond.html</loc>
<lastmod>2023-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda-math/2023/03/01/cunumeric-and-legate-how-to-create-a-distributed-gpu-accelerated-library.html</loc>
<lastmod>2023-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/profile/2023/03/01/debugging-cuda-an-overview-of-cuda-correctness-tools.html</loc>
<lastmod>2023-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cutlass/2023/03/01/developing-optimal-cuda-kernels-on-hopper-tensor-cores.html</loc>
<lastmod>2023-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/network/2023/03/01/how-to-streamline-shared-memory-space-with-the-nvshmem-communication-library.html</loc>
<lastmod>2023-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2023/03/01/how-to-write-a-cuda-program.html</loc>
<lastmod>2023-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2023/03/01/increasing-data-center-efficiency-by-optimizing-gpu-utilization-session-id-s51297.html</loc>
<lastmod>2023-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2023/03/01/optimizing-at-scale-investigating-hidden-bottlenecks-in-multi-node-workloads.html</loc>
<lastmod>2023-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2023/03/01/programming-model-and-applications-for-grace-hopper-superchip.html</loc>
<lastmod>2023-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda-math/2023/03/01/recent-developments-in-nvidia-math-libraries.html</loc>
<lastmod>2023-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2023/03/01/robust-and-efficient-cuda-c-concurrency-with-stream-ordered-allocation.html</loc>
<lastmod>2023-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/network/2023/03/01/s5111-scaling-deep-learning-training-fast-inter-gpu-communication-with-nccl.html</loc>
<lastmod>2023-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/profile/2023/03/01/s51205-from-the-macro-to-the-micro-cuda-developer-tools-find-and-fix-problems-at-any-scale.html</loc>
<lastmod>2023-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2024/03/01/advanced-performance-optimization-in-cuda-s62192.html</loc>
<lastmod>2024-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2024/03/01/cuda-new-features-and-beyond.html</loc>
<lastmod>2024-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cutlass/2024/03/01/cutlass-a-performant-flexible-and-portable-way-to-target-hopper-tensor-cores.html</loc>
<lastmod>2024-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda-math/2024/03/01/deep-dive-into-math-libraries.html</loc>
<lastmod>2024-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2024/03/01/grace-hopper-superchip-architecture-and-performance-optimizations-for-ai-applications.html</loc>
<lastmod>2024-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2025/03/01/1001-ways-to-write-cuda-kernels-in-python.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2025/03/01/accelerated-python-the-community-and-ecosystem.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2025/03/01/application-optimization-for-nvidia-grace-cpu.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/triton/2025/03/01/blackwell-programming-for-the-masses-with-openai-triton.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2025/03/01/cuda-new-features-and-beyond.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2025/03/01/cuda-techniques-to-maximize-compute-and-instruction-throughput-s72685.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2025/03/01/cuda-techniques-to-maximize-memory-bandwidth-and-hide-latency-s72683.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/llm/engineering/inference/2025/03/01/flashattention-3-fast-and-accurate-attention-with-asynchrony-and-low-precision.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2025/03/01/get-the-most-performance-from-grace-hopper.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2025/03/01/how-to-get-data-between-storage-and-the-gpu-at-the-speed-of-light.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2025/03/01/how-to-write-a-cuda-program-the-parallel-programming-edition.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2025/03/01/how-you-should-write-a-cuda-c-kernel.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/network/2025/03/01/inter-gpu-communication-technology.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2025/03/01/its-easier-than-you-think-debugging-and-optimizing-cuda-with-intelligent-developer-tools.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2025/03/01/performance-optimization-tutorial-part-3-s72686-cuda-techniques-to-maximize-concurrency-and-system-utilization.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/llm/engineering/train/2025/03/01/profiling-large-language-model-trainings-on-the-grace-hopper-superchip-using-nsight-systems.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cutlass/2025/03/01/programming-blackwell-tensor-cores-with-cute-and-cutlass.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/network/2025/03/01/s51882-become-faster-in-writing-performant-cuda-kernels-using-the-source-page-in-nsight-compute.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2025/03/01/the-cuda-c-developers-toolbox.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2025/03/01/the-cuda-python-developers-toolbox.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2025/03/01/the-performance-of-cuda-with-the-flexibility-of-pytorch.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cutlass/2025/03/01/use-cutlass-to-fuse-multiple-gemms-to-extreme-performance.html</loc>
<lastmod>2025-03-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2025/05/01/balancing-the-compute-throughput-latency-in-async-programming.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cutlass/2025/05/01/enable-tensor-core-programming-in-python-with-cutlass-40.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/llm/engineering/train/2025/05/01/fp8-training-recipes-performance-and-convergence.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/llm/engineering/train/2025/05/01/mcore-moe-in-2025-deepseek-v3-and-beyond.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/llm/engineering/train/2025/05/01/megatron-core-custom-fsdp.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/2025/05/01/optimizing-memory-bandwidth-and-latency-on-hopper-blackwell.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cuda/profile/2025/05/01/s72867-ai-developer-tools-for-accelerated-computing-scarce-data-isnt-scary.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/llm/engineering/inference/2025/05/01/tensorrt-llm-pytorch-a-new-development-paradigm-for-high-performance-llm-inference.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/llm/engineering/inference/2025/05/01/tensorrt-llm.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/llm/engineering/inference/2025/05/01/tensorrt-llm%E9%A9%B1%E5%8A%A8deepseek%E6%80%A7%E8%83%BD%E6%9E%81%E9%99%90-%E5%8D%8F%E5%90%8C%E8%85%BE%E8%AE%AF%E8%81%94%E5%90%88%E4%BC%98%E5%8C%96%E5%AE%9E%E8%B7%B5.html</loc>
<lastmod>2025-05-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/llm/engineering/inference/2025/11/01/a-practical-guide-to-deploying-nvfp4-for-efficient-inference-on-blackwell-gpus.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/llm/engineering/inference/2025/11/01/best-practice-of-blackwell-gpu-deployment-in-the-chinese-market.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/llm/engineering/train/2025/11/01/best-practice-of-mla-kernel-optimization-on-blackwell.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/llm/engineering/train/2025/11/01/best-practices-of-reinforcement-learning-with-verl.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/llm/engineering/train/2025/11/01/cuda-profiling-and-debugging-tools-for-llm.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/2025/11/01/deepgemm-20-technical-overview.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/llm/engineering/train/2025/11/01/deepseek-v3-pre-training-optimization-on-grace-blackwell.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/llm/engineering/train/2025/11/01/distributed-implementation-of-muon-and-emerging-optimizers-in-megatron-core.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/llm/engineering/train/2025/11/01/hybrid-ep-an-efficient-moe-communication-implementation.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/llm/engineering/inference/2025/11/01/linear-attention.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/llm/engineering/train/2025/11/01/megatron-core-moe-updates-2025-h2.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/llm/engineering/inference/2025/11/01/tensorrt-llm-large-scale-expert-parallelism-optimizations.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/gpu/cutlass/2025/11/01/the-evolution-and-applications-of-cutedsl.html</loc>
<lastmod>2025-11-01T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/robotics/2026/04/15/batching-helpers-optimizing-loss-computation.html</loc>
<lastmod>2026-04-15T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/robotics/2026/04/15/bridge-the-sim2real-gap-with-neural-actuator.html</loc>
<lastmod>2026-04-15T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/recsys/2026/04/15/hstu-attention-development-and-optimization-using-cutlasscute.html</loc>
<lastmod>2026-04-15T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/robotics/2026/04/15/isaacsimlab-benchmark.html</loc>
<lastmod>2026-04-15T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/mlsys/networking/2026/04/15/rdma-aware-networks-programming-user-manual.html</loc>
<lastmod>2026-04-15T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/recsys/2026/04/15/recsys-example-hstu-model-training-and-inference-best-practice.html</loc>
<lastmod>2026-04-15T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/slides/robotics/2026/04/15/video-training-for-assistance-driving.html</loc>
<lastmod>2026-04-15T00:00:00+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/</loc>
</url>
<url>
<loc>https://www.papercache.org/collection.html</loc>
</url>
<url>
<loc>https://www.papercache.org/about/</loc>
</url>
<url>
<loc>https://www.papercache.org/account/favorites.html</loc>
</url>
<url>
<loc>https://www.papercache.org/feeds/</loc>
</url>
<url>
<loc>https://www.papercache.org/admin/</loc>
</url>
<url>
<loc>https://www.papercache.org/account/profile.html</loc>
</url>
<url>
<loc>https://www.papercache.org/auth/reset-password.html</loc>
</url>
<url>
<loc>https://www.papercache.org/auth/verify.html</loc>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/diffusions/dllm/2024-08-%5Bnips25%5D-PipeFusion-Patch-level-Pipeline-Parallelism-for-Diffusion-Transformers-Inference.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/diffusions/dllm/2025-05-%5Bnips25%5D-Accelerating-Diffusion-LLMs-via-Adaptive-Parallel-Decoding.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/diffusions/dllm/2025-05-%5Bnips25%5D-Sparse-VideoGen2-Accelerate-Video-Generation-with-Sparse-Attention-via-Semantic-Aware-Permutation.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/diffusions/dllm/2025-07-Fast-dLLM-Training-free-Acceleration-of-Diffusion-LLM-by-Enabling-KV-Cache-and-Parallel-Decoding.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/diffusions/dllm/2025-09-FAST-DLLM-V2-Efficient-Block-Diffusion-LLM.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/diffusions/dllm/2025-12-%5Bnips25%5D-Radial-Attention-On-log-n-Sparse-Attention-with-Energy-Decay-for-Long-Video-Generation.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/agent/2022-01-%5Bnips22%5D-Chain-of-Thought-Prompting-Elicits-Reasoning-in-Large-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/agent/2022-07-%5Bnips22%5D-WebShop-Towards-Scalable-Real-World-Web-Interaction-with-Grounded-Language-Agents.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/agent/2023-02-%5Bnips23%5D-Toolformer-Language-Models-Can-Teach-Themselves-to-Use-Tools.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/agent/2023-03-%5Biclr23%5D-REAC-T-SYNERGIZING-REASONING-AND-ACTING-IN-LANGUAGE-MODELS.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/agent/2023-09-%5Bnips23%5D-Tree-of-Thoughts-Deliberate-Problem-Solving-with-Large-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/agent/2023-10-FIREACT-TOWARD-LANGUAGE-AGENT-FINE-TUNING.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/agent/2023-12-Retrieval-Augmented-Generation-for-Large-Language-Models-A-Survey.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/agent/2024-09-Large-Language-Model-Based-Agents-for-Software-Engineering-A-Survey.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/agent/2025-03-R1-Searcher-Incentivizing-the-Search-Capability-in-LLMs-via-Reinforcement-Learning.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/agent/2025-03-Search-R1-Training-LLMs-to-Reason-and-Leverage-Search-Engines-with-Reinforcement-Learning.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/agent/2025-04-Mem0-Building-Production-Ready-AI-Agents-with-Scalable-Long-Term-Memory.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/agent/2025-05-A-Survey-on-Test-Time-Scaling-in-Large-Language-Models-What-How-Where-and-How-Well.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/agent/2025-06-%5Bnips25%5D-ContextCache-Context-Aware-Semantic-Cache-for-Multi-Turn-Queries-in-Large-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/agent/2025-07-%5Bnips25%5D-MemAgent-Reshaping-Long-Context-LLM-with-Multi-Conv-RL-based-Memory-Agent.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/agent/2025-09-Effective-Context-Engineering-for-AI-Agents.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/agent/2025-09-The-Landscape-of-Agentic-Reinforcement-Learning-for-LLMs-A-Survey.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/agent/2025-09-UI-TARS-2-Technical-Report-Advancing-GUI-Agent-with-Multi-Turn-Reinforcement-Learning.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/agent/2025-09-%5Bnips25%5D-Transcending-Cost-Quality-Tradeoff-in-Agent-Serving-via-Session-Awareness.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/agent/2025-12-Towards-a-Science-of-Scaling-Agent-Systems.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/agent/2026-03-Do-Phone-Use-Agents-Respect-Your-Privacy.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/2025-06-%5Bnips25%5D-Spark-Transformer-Reactivating-Sparsity-in-FFN-and-Attention.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/2025-09-%5Bnips25%5D-Fast-Attention-Mechanisms-A-Tale-of-Parallelism.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/2025-11-Virtual-Width-Networks.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/2025-11-Weight-sparse-Transformers-Have-Interpretable-Circuits.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/2025-04-%5Biclr25%5D-A-Little-Goes-a-Long-Way-Efficient-Long-Context-Training-and-Inference-with-Partial-Contexts.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/2025-06-Gated-Attention-for-Large-Language-Models-Non-linearity-Sparsity-and-Attention-Sink-Free.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/2025-06-%5Bnips25%5D-Multipole-Attention-for-Efficient-Long-Context-Reasoning.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/2025-10-Why-Low-Precision-Transformer-Training-Fails-An-Analysis-on-Flash-Attention.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/2025-12-%5Bnips25%5D-Tensor-Product-Attention-Is-All-You-Need.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/2025-12-mHC-Manifold-Constrained-Hyper-Connections.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/linear/2020-06-%5Bicml20%5D-Transformers-are-RNNs-Fast-Autoregressive-Transformers-with-Linear-Attention.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/linear/2021-02-%5Biclr21%5D-LEARNING-ASSOCIATIVE-INFERENCE-USING-FAST-WEIGHT-MEMORY.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/linear/2021-06-%5Bicml21%5D-Linear-Transformers-Are-Secretly-Fast-Weight-Programmers.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/linear/2022-01-%5Bpmlr22%5D-Transformer-Quality-in-Linear-Time.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/linear/2022-10-The-Devil-in-Linear-Transformer.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/linear/2023-07-Scaling-TransNormer-to-175-Billion-Parameters.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/linear/2023-12-Gated-Linear-Attention-Transformers-with-Hardware-Efficient-Training.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/linear/2024-01-Lightning-Attention-2-A-Free-Lunch-for-Handling-Unlimited-Sequence-Lengths-in-Large-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/linear/2024-04-Linear-Attention-Sequence-Parallelism.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/linear/2024-05-Various-Lengths-Constant-Speed-Efficient-Language-Modeling-with-Lightning-Attention.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/linear/2025-01-MiniMax-01-Scaling-Foundation-Models-with-Lightning-Attention.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/linear/2025-08-Artificial-Hippocampus-Networks-for-Efficient-Long-Context-Modeling.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/sparsity/2020-04-Longformer-The-Long-Document-Transformer.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/sparsity/2025-02-Native-Sparse-Attention-Hardware-Aligned-and-Natively-Trainable-Sparse-Attention.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/sparsity/2025-02-%5Bnips25%5D-MOBA-MIXTURE-OF-BLOCK-ATTENTION-FOR-LONG-CONTEXT-LLMS.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/sparsity/2025-12-%5Bmlsys26%5D-BLASST-Dynamic-Blocked-Attention-Sparsity-via-Softmax-Thresholding.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/ssm/2019-01-Transformer-XL-Attentive-Language-Models-Beyond-a-Fixed-Length-Context.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/ssm/2021-10-Combining-Recurrent-Convolutional-and-Continuous-time-Models-with-Linear-State-Space-Layers.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/ssm/2021-10-S4-Efficiently-Modeling-Long-Sequences-with-Structured-State-Spaces.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/ssm/2023-03-S5-SIMPLIFIED-STATE-SPACE-LAYERS-FOR-SEQUENCE-MODELING.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/ssm/2023-03-SIMPLIFIED-STATE-SPACE-LAYERS-FOR-SEQUENCE-MODELING.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/ssm/2023-04-Hungry-Hungry-Hippos-Towards-Language-Modeling-with-State-Space-Models.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/ssm/2023-12-Mamba-Linear-Time-Sequence-Modeling-with-Selective-State-Spaces.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/ssm/2024-02-MoE-Mamba-Efficient-Selective-State-Space-Models-with-Mixture-of-Experts.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/ssm/2024-03-Jamba-A-Hybrid-Transformer-Mamba-Language-Model.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/ssm/2024-05-Mamba2-Transformers-are-SSMs-Generalized-Models-and-Efficient-Algorithms-Through-Structured-State-Space-Duality.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/ssm/2024-12-%5Biclr25%5D-GATED-DELTA-NETWORKS-IMPROVING-MAMBA2-WITH-DELTA-RULE.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/attention/ssm/2025-07-%5Bnips25%5D-Overcoming-Long-Context-Limitations-of-State-Space-Models-via-Context-Dependent-Sparse-Attention.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/transformer-variant/2018-07-Universal-Transformers.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/transformer-variant/2024-03-Scaling-up-Test-Time-Compute-with-Latent-Reasoning-A-Recurrent-Depth-Approach.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/transformer-variant/2024-07-Scaling-Laws-with-Vocabulary-Larger-Models-Deserve-Larger-Vocabularies.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/transformer-variant/2025-02-Reasoning-with-Latent-Thoughts-On-the-Power-of-Looped-Transformers.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/transformer-variant/2025-04-%5Biclr25%5D-Hyper-Connections.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/transformer-variant/2025-08-Hierarchical-Reasoning-Model.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/transformer-variant/2025-10-Parallel-Loop-Transformer-for-Efficient-Test-Time-Computation-Scaling.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/transformer-variant/2025-12-%5Bnips25%5D-Universal-Reasoning-Model.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/transformer-variant/2026-01-Conditional-Memory-via-Scalable-Lookup-A-New-Axis-of-Sparsity-for-Large-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/transformer-variant/2026-01-Fast-weight-Product-Key-Memory.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/transformer-variant/2026-01-MHLA-Restoring-Expressivity-of-Linear-Attention-via-Token-Level-Multi-Head.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/architecture/transformer-variant/2026-01-mHC-lite-You-Dont-Need-20-Sinkhorn-Knopp-Iterations.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/eval/2020-09-%5Biclr21%5D-Measuring-Massive-Multitask-Language-Understanding.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/eval/2023-05-On-the-Tool-Manipulation-Capability-of-Open-source-Large-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/eval/2023-11-GAIA-A-Benchmark-for-General-AI-Assistants.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/eval/2023-11-Instruction-Following-Evaluation-for-Large-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/eval/2024-05-LiveCodeBench-Holistic-and-Contamination-Free-Evaluation-of-Large-Language-Models-for-Code.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/eval/2024-09-%5Biclr%5D-SWE-BENCH-Can-Language-Models-Resolve-Real-world-Github-Issues.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/eval/2025-08-MCP-Bench-Benchmarking-Tool-Using-LLM-Agents-with-Complex-Real-World-Tasks-via-MCP-Servers.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/eval/2025-12-The-LLM-Evaluation-Guidebook.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2023-02-LLaMA-Open-and-Efficient-Foundation-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2023-07-Llama-2-Open-Foundation-and-Fine-Tuned-Chat-Models.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2024-01-DeepSeek-Coder-When-the-Large-Language-Model-Meets-Programming---The-Rise-of-Code-Intelligence.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2024-03-Gemini-15-Unlocking-multimodal-understanding-across-millions-of-tokens-of-context.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2024-06-DeepSeek-V2-A-Strong-Economical-and-Efficient-Mixture-of-Experts-Language-Model.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2024-07-The-Llama-3-Herd-of-Models.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2024-12-DeepSeek-V3-Technical-Report.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-01-KIMI-K15-SCALING-REINFORCEMENT-LEARNING-WITH-LLMS.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-04-Nemotron-H-A-Family-of-Accurate-and-Efficient-Hybrid-Mamba-Transformer-Models.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-04-Seed15-Thinking-Advancing-Superb-Reasoning-Models-with-Reinforcement-Learning.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-05-MiMo-Unlocking-the-Reasoning-Potential-of-Language-Model-From-Pretraining-to-Posttraining.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-05-Qwen3-Technical-Report.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-06-dotsllm1-Technical-Report.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-07-Step-3-is-Large-yet-Affordable-Model-system-Co-design-for-Cost-effective-Decoding.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-08-GLM-45-Agentic-Reasoning-and-Coding-ARC-Foundation-Models.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-08-KIMI-K2-OPEN-AGENTIC-INTELLIGENCE.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-08-Kling-Omni-Technical-Report.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-08-LongCat-Flash-Technical-Report.html</loc>
<lastmod>2026-04-15T17:29:01+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-08-NVIDIA-Nemotron-Nano-2-An-Accurate-and-Efficient-Hybrid-Mamba-Transformer-Reasoning-Model.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-09-LongCat-Flash-Thinking-Technical-Report.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-09-MiMo-Audio-Audio-Language-Models-are-Few-Shot-Learners.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-10-DeepSeek-OCR-Contexts-Optical-Compression.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-10-KIMI-LINEAR-AN-EXPRESSIVE-EFFICIENT-ATTENTION-ARCHITECTURE.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-10-LongCat-Flash-Omni-Technical-Report.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-11-DeepSeek-V32-Exp-Boosting-Long-Context-Efficiency-with-DeepSeek-Sparse-Attention.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-11-Gemini-3-Pro-Model-Card.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-11-HunyuanOCR-Technical-Report.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-11-System-Card-Claude-Opus-45.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-12-DeepSeek-V32-Pushing-the-Frontier-of-Open-Large-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-12-MiMo-V2-Flash-Technical-Report.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-12-NVIDIA-Nemotron-3-Efficient-and-Open-Intelligence.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-12-Nemotron-3-Nano-Open-Efficient-Mixture-of-Experts-Hybrid-Mamba-Transformer-Model-for-Agentic-Reasoning.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-12-QwenLong-L15-Post-Training-Recipe-for-Long-Context-Reasoning-and-Memory-Management.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-12-Seed18-Model-Card-Towards-Generalized-Real-World-Agency.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2025-12-Update-to-GPT-5-System-Card-GPT-52.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2026-01-IQuest-Coder-V1-Technical-Report.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2026-01-Scaling-Embeddings-Outperforms-Scaling-Experts-in-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2026-02-GLM-5-From-Vibe-Coding-to-Agentic-Engineering.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2026-02-KIMI-K25-VISUAL-AGENTIC-INTELLIGENCE.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2026-02-Seed20-Model-Card-Towards-Intelligence-Frontier-for-Real-World-Complexity.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2026-02-System-Card-Claude-Opus-46.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/models/2026-04-System-Card-Claude-Mythos-Preview.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/pretrain_sft/2013-12-WHAT-MAKES-GOOD-DATA-FOR-ALIGNMENT-A-COMPREHENSIVE-STUDY-OF-AUTOMATIC-DATA-SELECTION-IN-INSTRUCTION-TUNING.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/pretrain_sft/2021-04-ROFORMER-ENHANCED-TRANSFORMER-WITH-ROTARY-POSITION-EMBEDDING.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/pretrain_sft/2022-03-DeepNet-Scaling-Transformers-to-1000-Layers.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/pretrain_sft/2022-03-Tensor-Programs-V-Tuning-Large-Neural-Networks-via-Zero-Shot-Hyperparameter-Transfer.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/pretrain_sft/2022-03-Training-Compute-Optimal-Large-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/pretrain_sft/2024-05-%5Bnips24%5D-Stacking-Your-Transformers-A-Closer-Look-at-Model-Growth-for-Efficient-LLM-Pre-Training.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/pretrain_sft/2024-08-Fusechat-Knowledge-Fusion-of-Chat-Models.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/pretrain_sft/2024-08-%5Biclr25%5D-INFERENCE-SCALING-LAWS-AN-EMPIRICAL-ANALYSIS-OF-COMPUTE-OPTIMAL-INFERENCE-FOR-LLM-PROBLEM-SOLVING.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/pretrain_sft/2024-12-UNVEILING-THE-SECRET-RECIPE-A-GUIDE-FOR-SUPERVISED-FINE-TUNING-SMALL-LLMS.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/pretrain_sft/2025-03-SampleMix-A-Sample-wise-Pre-training-Data-Mixing-Strategey-by-Coordinating-Data-Quality-and-Diversity.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/pretrain_sft/2025-04-%5Biclr25%5D-How-Does-Critical-Batch-Size-Scale-in-Pretraining.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/pretrain_sft/2025-11-EvoLM-In-Search-of-Lost-Language-Model-Training-Dynamics.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/pretrain_sft/2025-11-Nested-Learning-The-Illusion-of-Deep-Learning-Architectures.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/pretrain_sft/2025-12-%5Bnips25%5D-SkyLadder-Better-and-Faster-Pretraining-via-Context-Window-Scheduling.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/pretrain_sft/pruning/2025-04-%5Biclr25%5D-Beware-of-Calibration-Data-for-Pruning-Large-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/pretrain_sft/pruning/2025-04-%5Biclr25%5D-Probe-Pruning-Accelerating-LLMs-Through-Dynamic-Pruning-via-Model-Probing.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2013-12-Playing-Atari-with-Deep-Reinforcement-Learning.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2015-07-Massively-Parallel-Methods-for-Deep-Reinforcement-Learning.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2016-01-Mastering-the-game-of-Go-with-deep-neural-networks-and-tree-search.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2017-08-Proximal-Policy-Optimization-Algorithms.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2017-12-%5Bpmlr18%5D-RLlib-Abstractions-for-Distributed-Reinforcement-Learning.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2020-09-%5Bnips20%5D-Learning-to-summarize-from-human-feedback.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2022-03-Training-language-models-to-follow-instructions-with-human-feedback.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2022-04-Training-a-Helpful-and-Harmless-Assistant-with-Reinforcement-Learning-from-Human-Feedback.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2023-05-%5Bnips23%5D-Direct-Preference-Optimization-Your-Language-Model-is-Secretly-a-Reward-Model.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2023-10-SteerLM-Attribute-Conditioned-SFT-as-an-User-Steerable-Alternative-to-RLHF.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2024-01-Self-Play-Fine-Tuning-Converts-Weak-Language-Models-to-Strong-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2024-02-DeepSeekMath-Pushing-the-Limits-of-Mathematical-Reasoning-in-Open-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2024-05-NeMo-Aligner-Scalable-Toolkit-for-Efficient-Model-Alignment.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2024-07-%5Bnips24%5D-HelpSteer2-Open-source-dataset-for-training-top-performing-reward-models.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2024-08-Scaling-LLM-Test-Time-Compute-Optimally-can-be-More-Effective-than-Scaling-Model-Parameters.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2025-03-%5Bneurips25%5D-Tapered-Off-Policy-REINFORCE-Stable-and-Efficient-Reinforcement-Learning-for-LLMs.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2025-04-Exploring-Data-Scaling-Trends-and-Effects-in-Reinforcement-Learning-from-Human-Feedback.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2025-05-DAPO-An-Open-Source-LLM-Reinforcement-Learning-System-at-Scale.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2025-05-INTELLECT-2-A-Reasoning-Model-Trained-Through-Globally-Decentralized-Reinforcement-Learning.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2025-05-ProRL-Prolonged-Reinforcement-Learning-Expands-Reasoning-Boundaries-in-Large-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2025-05-The-Entropy-Mechanism-of-Reinforcement-Learning-for-Reasoning-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2025-07-Group-Sequence-Policy-Optimization.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2025-08-ON-POLICY-RL-MEETS-OFF-POLICY-EXPERTS-HARMONIZING-SUPERVISED-FINE-TUNING-AND-REINFORCEMENT-LEARNING-VIA-DYNAMIC-WEIGHTIN.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/algorithm/rl/2026-02-Understanding-and-Exploiting-Weight-Update-Sparsity-for-Communication-Efficient-Distributed-RL.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2019-11-Fast-Transformer-Decoding-One-Write-Head-is-All-You-Need.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2020-02-%5Bicml20%5D-Low-Rank-Bottleneck-in-Multi-head-Attention-Models.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2022-07-%5Bnips22%5D-FlashAttention-Fast-and-Memory-Efficient-Exact-Attention-with-IO-Awareness.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2023-07-FlashAttention-2-Faster-Attention-with-Better-Parallelism-and-Work-Partitioning.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2023-09-%5Bpodc23%5D-DEEPSPEED-ULYSSES-SYSTEM-OPTIMIZATIONS-FOR-ENABLING-TRAINING-OF-EXTREME-LONG-SEQUENCE-TRANSFORMER-MODELS.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2023-10-Ring-Attention-with-Blockwise-Transformers-for-Near-Infinite-Context.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2023-12-GQA-Training-Generalized-Multi-Query-Transformer-Models-from-Multi-Head-Checkpoints.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2024-04-Leave-No-Context-Behind-Efficient-Infinite-Context-Transformers-with-Infini-attention.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2024-07-USP-A-Unified-Sequence-Parallelism-Approach-for-Long-Context-Generative-AI.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2024-07-%5Bnips24%5D-FlashAttention-3-Fast-and-Accurate-Attention-with-Asynchrony-and-Low-precision.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2024-10-DuoAttention-Efficient-Long-Context-LLM-Inference-with-Retrieval-and-Streaming-Heads.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2024-10-%5Biclr25%5D-SageAttention-Accurate-8-Bit-Attention-for-Plug-and-Play-Inference-Acceleration.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2024-11-SageAttention2-Efficient-Attention-with-Thorough-Outlier-Smoothing-and-Per-thread-INT4-Quantization.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2025-01-iclr25-DEFT-Decoding-with-Flash-Tree-Attention-for-Efficient-Tree-Structured-LLM-Inference.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2025-02-LASP-2-Rethinking-Sequence-Parallelism-for-Linear-Attention-and-its-Hybrid.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2025-02-TREE-ATTENTION-TOPOLOGY-AWARE-DECODING-FOR-LONG-CONTEXT-ATTENTION-ON-GPU-CLUSTERS.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2025-03-%5Bnips25%5D-Tiled-Flash-Linear-Attention-More-Efficient-Linear-RNN-and-xLSTM-Kernels.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2025-04-%5Biclr25%5D-Flashmask-Efficient-and-Rich-Mask-Extension-of-Flashattention.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2025-05-FlashMLA-ETAP-Efficient-Transpose-Attention-Pipeline-for-Accelerating-MLA-Inference-on-NVIDIA-H20-GPUs.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2025-05-%5Bnips25%5D-SageAttention3-Microscaling-FP4-Attention-for-Inference-and-An-Exploration-of-8-bit-Training.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2025-08-Mixture-of-Contexts-for-Long-Video-Generation.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2025-09-SLA-BEYOND-SPARSITY-IN-DIFFUSION-TRANSFORMERS-VIA-FINE-TUNABLE-SPARSELINEAR-ATTENTION.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2025-09-%5Bnips25%5D-SeerAttention-Self-distilled-Attention-Gating-for-Efficient-Long-context-Prefilling.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2025-12-Mesh-Attention-A-New-Communication-Efficient-Distributed-Attention-with-Improved-Data-Locality.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2025-12-%5Bnips25%5D-Skrull-Towards-Efficient-Long-Context-Fine-tuning-through-Dynamic-Data-Scheduling.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/attention/2026-01-FlashAttention-T-Towards-Fully-Tensorized-Attention-by-Exploiting-Tensor-Vector-Parallelism.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/compiler/2018-10-%5Bosdi18%5D-TVM-An-Automated-End-to-End-Optimizing-Compiler-for-Deep-Learning.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/compiler/2024-10-FLUX-FAST-SOFTWARE-BASED-COMMUNICATION-OVERLAP-ON-GPUS-THROUGH-KERNEL-FUSION.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/compiler/2024-12-FLEX-ATTENTION-A-PROGRAMMING-MODEL-FOR-GENERATING-OPTIMIZED-ATTENTION-KERNELS.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/compiler/2025-04-TileLang-A-Composable-Tiled-Programming-Model-for-AI-Systems.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/compiler/2025-04-TileLink-Generating-Efficient-Compute-Communication-Overlapping-Kernels-using-Tile-Centric-Primitives.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/compiler/2025-04-%5Biclr25%5D-ThunderKittens-Simple-Fast-and-Adorable-Kernels.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/compiler/2025-05-%5Bppopp25%5D-FlashTensor-Optimizing-Tensor-Programs-by-Leveraging-Fine-grained-Tensor-Property.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/compiler/2025-06-%5Bosdi25%5D-Mirage-A-Multi-Level-Superoptimizer-for-Tensor-Programs.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/compiler/2025-08-%5Bnips25%5D-ClusterFusion-Expanding-Operator-Fusion-Scope-for-LLM-Inference-via-Cluster-Level-Collective-Primitive.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/compiler/2025-10-Tawa-Automatic-Warp-Specialization-for-Modern-GPUs-with-Asynchronous-References.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/compiler/2025-12-Mirage-Persistent-Kernel-A-Compiler-and-Runtime-for-Mega-Kernelizing-Tensor-Programs.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2019-10-Transformers-State-of-the-Art-Natural-Language-Processing.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2022-07-%5Bosdi22%5D-Orca-A-Distributed-Serving-System-for-Transformer-Based-Generative-Models.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2022-11-EFFICIENTLY-SCALING-TRANSFORMER-INFERENCE.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2023-08-SARATHI-Efficient-LLM-Inference-by-Piggybacking-Decodes-with-Chunked-Prefills.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2023-09-%5Bsosp23%5D-Efficient-Memory-Management-for-Large-Language-Model-Serving-with-PagedAttention.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2023-10-Flash-Decoding-for-long-context-inference.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2023-11-STRIPED-ATTENTION-FASTER-RING-ATTENTION-FOR-CAUSAL-TRANSFORMERS.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2023-12-%5Bnsdi25%5D-SuperServe-Fine-Grained-Inference-Serving-for-Unpredictable-Workloads.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2024-01-%5Bosdi24%5D-DistServe-Disaggregating-Prefill-and-Decoding-for-Goodput-optimized-Large-Language-Model-Serving.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2024-04-%5Basplos24%5D-Proteus-A-High-Throughput-Inference-Serving-System-with-Accuracy-Scaling.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2024-05-Efficient-Heterogeneous-Large-Language-Model-Decoding-with-Model-Attention-Disaggregation.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2024-05-Preble-Efficient-Distributed-Prompt-Scheduling-for-LLM-Serving.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2024-05-Splitwise-Efficient-Generative-LLM-Inference-Using-Phase-Splitting.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2024-07-Mooncake-A-KVCache-centric-Disaggregated-Architecture-for-LLM-Serving.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2024-07-%5Bnips24%5D-SGLang-Efficient-Execution-of-Structured-Language-Model-Programs.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2024-08-%5Bosdi25%5D-NanoFlow-Towards-Optimal-Large-Language-Model-Serving-Throughput.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2024-09-Mnemosyne-Parallelization-Strategies-for-Efficiently-Serving-Multi-Million-Context-Length-LLM-Inference-Requests-Without.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2024-12-%5Bmlsys26%5D-BatchLLM-Optimizing-Large-Batched-LLM-Inference-with-Global-Prefix-Sharing-and-Throughput-oriented-Token-Batching.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-01-%5Batc25%5D-QFactory-Accelerating-Quantized-Large-Language-Model-Serving-with-Qtile-Graphs.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-01-%5Batc25%5D-Weaver-Efficient-Multi-LLM-Serving-with-Attention-Offloading.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-04-Tilus-A-Virtual-Machine-for-Arbitrary-Low-Precision-GPGPU-Computation-in-LLM-Serving.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-04-%5Biclr25%5D-Fiddler-CPU-GPU-Orchestration-for-Fast-Inference-of-Mixture-of-Experts-Models.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-04-%5Biclr25%5D-VL-Cache-Sparsity-and-Modality-Aware-KV-Cache-Compression-for-Vision-Language-Model-Inference-Acceleration.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-04-%5Bmlsys26%5D-Helios-Adaptive-Model-and-Early-Exit-Selection-for-Efficient-LLM-Inference-Serving.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-05-%5Bisca25%5D-Insights-into-DeepSeek-V3-Scaling-Challenges-and-Reflections-on-Hardware-for-AI-Architectures.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-05-%5Bmlsys26%5D-TokenWeave-Efficient-Compute-Communication-Overlap-for-Distributed-LLM-Inference.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-05-gLLM-Global-Balanced-Pipeline-Parallelism-System-for-Distributed-LLM-Serving-with-Token-Throttling.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-06-%5Bisca25%5D-LIA-A-Single-GPU-LLM-Inference-Acceleration-with-Cooperative-AMX-Enabled-CPU-GPU-Computation-and-CXL-Offloading.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-06-%5Bnips25%5D-TD-Pipe-Temporally-Disaggregated-Pipeline-Parallelism-Architecture-for-High-Throughput-LLM-Inference.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-06-%5Bnips25%5D-Understanding-and-Mitigating-Numerical-Sources-of-Nondeterminism-in-LLM-Inference.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-07-Helix-Parallelism-Rethinking-Sharding-Strategies-for-Interactive-Multi-Million-Token-LLM-Decoding.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-07-MegaScale-Infer-Serving-Mixture-of-Experts-at-Scale-with-Disaggregated-Expert-Parallelism.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-07-%5Bnips25%5D-ZeCO-Zero-Communication-Overhead-Sequence-Parallelism-for-Linear-Attention.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-08-Towards-Efficient-and-Practical-GPU-Multitasking-in-the-Era-of-LLM.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-09-Defeating-Nondeterminism-in-LLM-Inference.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-09-Scaling-LLM-Test-Time-Compute-with-Mobile-NPU-on-Smartphones.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-09-%5Bnips25%5D-Let-the-LLM-Stick-to-Its-Strengths-Learning-to-Route-Economical-LLM.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-10-TASP-Topology-Aware-Sequence-Parallelism.html</loc>
<lastmod>2026-04-15T17:29:02+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-10-When-to-Reason-Semantic-Router-for-vLLM.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-10-%5Bmlsys26%5D-From-Tokens-to-Layers-Redefining-Stall-Free-Scheduling-for-LLM-Serving-with-Layered-Prefill.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-12-%5Bblog%5D-State-of-AI-An-Empirical-100-Trillion-Token-Study-with-OpenRouter.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2025-12-%5Bnips25%5D-DynaPipe-Dynamic-Layer-Redistribution-for-Efficient-Serving-of-LLMs-with-Pipeline-Parallelism.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2026-01-FlashInfer-Bench-Building-the-Virtuous-Cycle-for-AI-Driven-LLM-Systems.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2026-01-LAPS-A-Length-Aware-Prefill-LLM-Serving-System.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2026-02-%5Bmlsys26%5D-BOUTE-Cost-Efficient-LLM-Serving-with-Heterogeneous-LLMs-and-GPUs-via-Multi-Objective-Bayesian-Optimization.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/2026-04-DWDP-Distributed-Weight-Data-Parallelism-for-High-Performance-LLM-Inference-on-NVL72.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2023-10-%5Bsigcomm24%5D-CacheGen-KV-Cache-Compression-and-Streaming-for-Fast-Large-Language-Model-Serving.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2024-04-%5Bmlsys24%5D-PROMPT-CACHE-MODULAR-ATTENTION-REUSE-FOR-LOW-LATENCY-INFERENCE.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2024-05-You-Only-Cache-Once-Decoder-Decoder-Architectures-for-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2024-05-%5Beurosys25%5D-CacheBlend-Fast-Large-Language-Model-Serving-for-RAG-with-Cached-Knowledge-Fusion.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2024-07-%5Batc24%5D-Cost-Efficient-Large-Language-Model-Serving-for-Multi-turn-Conversations-with-CachedAttention.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2024-10-Do-Large-Language-Models-Need-a-Content-Delivery-Network.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-02-%5Bnips25%5D-KVLINK-Accelerating-Large-Language-Models-via-Efficient-KV-Cache-Reuse.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-02-%5Bnips25%5D-Twilight-Adaptive-Attention-Sparsity-with-Hierarchical-Top-p-Pruning.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-04-%5Biclr25%5D-Long-Context-Compression-with-Activation-Beacon.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-04-%5Biclr25%5D-Not-All-Heads-Matter-A-Head-Level-KV-Cache-Compression-Method-with-Integrated-Retrieval-and-Reasoning.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-04-%5Biclr25%5D-OmniKV-Dynamic-Context-Selection-for-Efficient-Long-Context-LLMs.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-04-%5Biclr25%5D-RazorAttention-Efficient-KV-Cache-Compression-Through-Retrieval-Heads.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-04-%5Biclr25%5D-SCBENCH-A-KV-Cache-Centric-Analysis-of-Long-Context-Methods.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-04-%5Biclr25%5D-SqueezeAttention-2D-Management-of-KVCache-in-LLM-Inference-via-Layer-Wise-Optimal-Budget.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-05-Prism-Unleashing-GPU-Sharing-for-Cost-Efficient-Multi-LLM-Serving.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-05-%5Bisca25%5D-Ecco-Improving-Memory-Bandwidth-and-Capacity-for-LLMs-via-Entropy-aware-Cache-Compression.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-06-%5Batc25%5D-KVCache-Cache-in-the-Wild-Characterizing-and-Optimizing-KVCache-Cache-at-a-Large-Cloud-Provider.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-06-%5Bnips25%5D-Compress-Gather-and-Recompute-REFORMing-Long-Context-Processing-in-Transformers.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-07-KVFlow-Efficient-Prefix-Caching-for-Accelerating-LLM-Based-Multi-Agent-Workflows.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-09-%5Bnips25%5D-Accurate-KV-Cache-Eviction-via-Anchor-Direction-Projection-for-Efficient-LLM-Inference.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-09-%5Bnips25%5D-Learned-Prefix-Caching-for-Efficient-LLM-Inference.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-10-%5Bmicro25%5D-Kelle-%20Co-design%20KV%20Caching%20and%20eDRAM%20for%20Efficient%20LLM%20Serving%20in%20Edge%20Computing.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-10-%5Bmicro25%5D-Kelle-Co-design-KV-Caching-and-eDRAM-for-Efficient-LLM-Serving-in-Edge-Computing.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-10-%5Bnips25%5D-ChunkKV-Semantic-Preserving-KV-Cache-Compression-for-Efficient-Long-Context-LLM-Inference.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-10-%5Bnips25%5D-KVCOMM-Online-Cross-context-KV-cache-Communication-for-Efficient-LLM-based-Multi-agent-Systems.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-10-%5Bnips25%5D-Tail-Optimized-Caching-for-LLM-Inference.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-11-Contextpilot-Fast-Long-context-Inference-Via-Context-Reuse.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-11-%5Bmlsys26%5D-FlexiCache-Leveraging-Temporal-Stability-of-Attention-Heads-for-Efficient-KV-Cache-Management.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-11-%5Bmlsys26%5D-Kitty-Accurate-and-Efficient-2-Bit-KV-Cache-Quantization-with-Dynamic-Channel-wise-Precision-Boost.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-12-%5Bmlsys26%5D-SkipKV-Selective-Skipping-of-KV-Generation-and-Storage-for-Efficient-Inference-with-Large-Reasoning-Models.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2025-12-%5Bnips25%5D-Efficient-Low-Rank-Attention-for-Long-Context-Inference-in-Large-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/kvcache/2026-04-TokenDance-Scaling-Multi-Agent-LLM-Serving-via-Collective-KV-Cache-Sharing.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2022-08-FP8-Quantization-The-Power-of-the-Exponent.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2022-09-FP8-FORMATS-FOR-DEEP-LEARNING.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2022-11-%5Bicml23%5D-SmoothQuant-Accurate-and-Efficient-Post-Training-Quantization-for-Large-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2023-04-Stable-and-low-precision-training-for-large-scale-vision-language-models.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2023-04-%5Bisca23%5D-With-Shared-Microexponents-A-Little-Shifting-Goes-a-Long-Way.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2023-05-%5Bicme24%5D-Integer-or-Floating-Point-New-Outlooks-for-Low-Bit-Quantization-on-Large-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2023-06-FP8-versus-INT8-for-efficient-deep-learning-inference.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2023-06-%5Bmlsys24%5D-AWQ-ACTIVATION-AWARE-WEIGHT-QUANTIZATION-FOR-ON-DEVICE-LLM-COMPRESSION-AND-ACCELERATION.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2023-09-OCP-Microscaling-Formats-MX-Specification.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2024-01-FP6-LLM-Efficiently-Serving-Large-Language-Models-Through-FP6-Centric-Algorithm-System-Co-Design.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2024-02-Massive-Activations-in-Large-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2024-08-%5Bisca25%5D-LUT-Tensor-Core-A-Software-Hardware-Co-Design-for-LUT-Based-Low-Bit-LLM-Inference.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2024-08-%5Bppopp25%5D-MARLIN-Mixed-Precision-Auto-Regressive-Parallel-Inference-on-Large-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2024-11-%5Bisca25%5D-MicroScopiQ-Accelerating-Foundational-Models-through-Outlier-Aware-Microscaling-Quantization.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2024-12-%5Bmlsys26%5D-MixLLM-LLM-Quantization-with-Global-Mixed-precision-between-Output-features-and-Highly-efficient-System-Design.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2025-01-%5Bosdi25%5D-DecDEC-A-Systems-Approach-to-Advancing-Low-Bit-LLM-Quantization.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2025-03-%5Bisca25%5D-Oaken-Fast-and-Efficient-LLM-Serving-with-Online-Offline-Hybrid-KV-Cache-Quantization.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2025-04-Ozaki-Scheme-II-A-GEMM-oriented-emulation-of-floating-point-matrix-multiplication-using-an-integer-modular-technique.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2025-04-%5Biclr25%5D-CBQ-Cross-Block-Quantization-for-Large-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2025-04-%5Biclr25%5D-Effective-Interplay-Between-Sparsity-and-Quantization-From-Theory-to-Practice.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2025-04-%5Biclr25%5D-Progressive-Mixed-Precision-Decoding-for-Efficient-LLM-Inference.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2025-04-%5Biclr25%5D-Scaling-Laws-for-Precision.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2025-04-%5Biclr26%5D-TurboQuant-Online-Vector-Quantization-with-Near-optimal-Distortion-Rate.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2025-05-Recipes-for-Pre-training-LLMs-with-MXFP8.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2025-09-LiquidGEMM-Hardware-Efficient-W4A8-GEMM-Kernel-for-High-Performance-LLM-Serving.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2025-09-%5Bnips25%5D-Q-Palette-Fractional-Bit-Quantizers-Toward-Optimal-Bit-Allocation-for-Efficient-LLM-Deployment.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2025-10-%5Bmicro25%5D-MX+-%20Pushing%20the%20Limits%20of%20Microscaling%20Formats%20for%20Efficient%20Large%20Language%20Model%20Serving.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2025-10-%5Bmicro25%5D-MX-Pushing-the-Limits-of-Microscaling-Formats-for-Efficient-Large-Language-Model-Serving.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2025-10-%5Bmlsys26%5D-CAGE-Curvature-Aware-Gradient-Estimation-for-Accurate-Quantization-Aware-Training.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2025-11-%5Bmlsys26%5D-IntAttention-A-Fully-Integer-Attention-Pipeline-for-Efficient-Edge-Inference.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/low_precision/2025-12-%5Bnips25%5D-CodeGEMM-A-Codebook-Centric-Approach-to-Efficient-GEMM-in-Quantized-LLMs.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2018-11-Blockwise-Parallel-Decoding-for-Deep-Autoregressive-Models.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2023-05-%5Bicml23%5D-Fast-Inference-from-Transformers-via-Speculative-Decoding.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2024-01-EAGLE-Speculative-Sampling-Requires-Rethinking-Feature-Uncertainty.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2024-03-%5Biclr25%5D-DEFT-Decoding-with-Flash-Tree-Attention-for-Efficient-Tree-Structured-LLM-Inference.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2024-04-Better-Faster-Large-Language-Models-via-Multi-token-Prediction.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2024-06-EAGLE-2-Faster-Inference-of-Language-Models-with-Dynamic-Draft-Trees.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2024-06-MEDUSA-Simple-LLM-Inference-Acceleration-Framework-with-Multiple-Decoding-Heads.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2024-08-MAGICDEC-BREAKING-THE-LATENCY-THROUGHPUT-TRADEOFF-FOR-LONG-CONTEXT-GENERATION-WITH-SPECULATIVE-DECODING.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2025-02-%5Bnips25%5D-EasySpec-Layer-Parallel-Speculative-Decoding-for-Efficient-Multi-GPU-Utilization.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2025-03-%5Bnips25%5D-EAGLE-3-Scaling-up-Inference-Acceleration-of-Large-Language-Models-via-Training-Time-Test.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2025-04-%5Biclr25%5D-Block-Verification-Accelerates-Speculative-Decoding.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2025-04-%5Biclr25%5D-Distributed-Speculative-Inference-DSI-Speculation-Parallelism-for-Provably-Faster-Lossless-Language-Model-Inference.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2025-04-%5Biclr25%5D-Faster-Cascades-via-Speculative-Decoding.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2025-04-%5Biclr25%5D-Mixture-of-Attentions-for-Speculative-Decoding.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2025-04-%5Biclr25%5D-Multi-Draft-Speculative-Sampling-Canonical-Decomposition-and-Theoretical-Limits.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2025-04-%5Biclr25%5D-SWIFT-On-the-Fly-Self-Speculative-Decoding-for-LLM-Inference-Acceleration.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2025-04-%5Biclr25%5D-Towards-Optimal-Multi-Draft-Speculative-Decoding.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2025-05-%5Bnips25%5D-MoESD-Unveil-Speculative-Decodings-Potential-for-Accelerating-Sparse-MoE.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2025-06-%5Bnips25%5D-Scaling-Speculative-Decoding-with-Lookahead-Reasoning.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2025-06-%5Bnips25%5D-Yggdrasil-Bridging-Dynamic-Speculation-and-Static-Runtime-for-Latency-Optimal-Tree-Based-LLM-Decoding.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2025-10-%5Bnips25%5D-GRIFFIN-Effective-Token-Alignment-for-Faster-Speculative-Decoding.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2025-10-%5Bnips25%5D-Speculate-Deep-and-Accurate-Lossless-and-Training-Free-Acceleration-for-Offloaded-LLMs-via-Substitute-Speculative-Decodi.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2025-11-%5Bmlsys26%5D-Beat-the-Long-Tail-Distribution-Aware-Speculative-Decoding-for-RL-Training.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2025-11-%5Bnips25%5D-SuffixDecoding-Extreme-Speculative-Decoding-for-Emerging-AI-Applications.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2025-12-Native-Parallel-Reasoner-Reasoning-in-Parallelism-via-Self-Distilled-Reinforcement-Learning.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2025-12-%5Bmlsys26%5D-Accelerating-Large-Scale-Reasoning-Model-Inference-Self-Speculative-Decoding-with-Sparse-Attention.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2026-01-LLM-42-Enabling-Determinism-in-LLM-Inference-with-Verified-Speculation.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2026-02-MoE-Spec-Expert-Budgeting-for-Efficient-Speculative-Decoding.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2026-02-P-EAGLE-Parallel-Drafting-EAGLE-with-Scalable-Training.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/inference/speculative_decoding/2026-DFlash-Block%20Diffusion%20for%20Flash%20Speculative%20Decoding.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2021-12-%5Bicml22%5D-GLaM-Efficient-Scaling-of-Language-Models-with-Mixture-of-Experts.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2022-01-DeepSpeed-MoE-Advancing-Mixture-of-Experts-Inference-and-Training-to-Power-Next-Generation-AI-Scale.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2022-02-ST-MOE-DESIGNING-STABLE-AND-TRANSFERABLE-SPARSE-EXPERT-MODELS.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2022-06-%5Bmlsys22%5D-TUTEL-ADAPTIVE-MIXTURE-OF-EXPERTS-AT-SCALE.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2023-03-%5Bemnlp23%5D-Scaling-Vision-Language-Models-with-Sparse-Mixture-of-Experts.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2024-01-DeepSeekMoE-Towards-Ultimate-Expert-Specialization-in-Mixture-of-Experts-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2024-03-Scattered-Mixture-of-Experts-Implementation.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2024-04-%5Bicml25%5D-Shortcut-connected-Expert-Parallelism-for-Accelerating-Mixture-of-Experts.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2024-05-MEMoE-Enhancing-Model-Editing-with-Mixture-of-Experts-Adaptors.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2024-08-AUXILIARY-LOSS-FREE-LOAD-BALANCING-STRATEGY-FOR-MIXTURE-OF-EXPERTS.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2024-10-EPS-MoE-Expert-Pipeline-Scheduler-for-Cost-Efficient-MoE-Inference.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2024-10-%5Biclr25%5D-MOE-ACCELERATING-MIXTURE-OF-EXPERTS-METHODS-WITH-ZERO-COMPUTATION-EXPERTS.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2025-01-%5Batc25%5D-PopFetcher-Towards-Accelerated-Mixture-of-Experts-Training-Via-Popularity-Based-Expert-Wise-Prefetch.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2025-02-fMoE-Fine-Grained-Expert-Offloading-for-Large-Mixture-of-Experts-Serving.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2025-04-%5Biclr25%5D-Netmoe-Accelerating-Moe-Training-Through-Dynamic-Sample-Placement.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2025-06-FlashDMoE-Fast-Distributed-MoE-in-a-Single-Kernel.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2025-09-Expert-as-a-Service-Towards-Efficient-Scalable-and-Robust-Large-scale-MoE-Serving.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2025-09-%5Bnips25%5D-DiEP-Adaptive-Mixture-of-Experts-Compression-Through-Differentiable-Expert-Pruning.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2025-09-%5Bnips25%5D-FlowMoE-A-Scalable-Pipeline-Scheduling-Framework-for-Distributed-Mixture-of-Experts-Training.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2025-10-HybridEP-Scaling-Expert-Parallelism-to-Cross-Datacenter-Scenario-via-Hybrid-ExpertData-Transmission.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2025-10-Previewing-UCCL-EP-Flexible-and-Efficient-Expert-Parallelism-for-Cloud-and-Beyond.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2025-10-%5Bmicro25%5D-Stratum-%20System-Hardware%20Co-Design%20with%20Tiered%20Monolithic%203D-Stackable%20DRAM%20for%20Efficient%20MoE%20Serving.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2025-10-%5Bmicro25%5D-Stratum-System-Hardware-Co-Design-with-Tiered-Monolithic-3D-Stackable-DRAM-for-Efficient-MoE-Serving.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2025-11-OPPORTUNISTIC-EXPERT-ACTIVATION-BATCH-AWARE-EXPERT-ROUTING-FOR-FASTER-DECODE-WITHOUT-RETRAINING.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2025-11-%5Bmlsys26%5D-FP8-Flow-MoE-A-Casting-Free-FP8-Recipe-without-Double-Quantization-Error.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2025-11-%5Bmlsys26%5D-FarSkip-Collective-Unhobbling-Blocking-Communication-in-Mixture-of-Experts-Models.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2025-11-%5Bnips25%5D-FlashMoE-Fast-Distributed-MoE-in-a-Single-Kernel.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2025-12-SonicMoE-Accelerating-MoE-with-IO-and-Tile-aware-Optimizations.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2026-01-Least-Loaded-Expert-Parallelism-Load-Balancing-an-Imbalanced-Mixture-of-Experts.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2026-01-%5Bmlsys26%5D-MoEBlaze-Breaking-the-Memory-Wall-for-Efficient-MoE-Training-on-Modern-GPUs.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2026-03-Scalable%20Training%20of%20Mixture-of-Experts%20Models%20with%20Megatron%20Core.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/moe/2026-04-Scalable-Pretraining-of-Large-Mixture-of-Experts-Language-Models-on-Aurora-Super-Computer.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2019-10-SEED-RL-SCALABLE-AND-EFFICIENT-DEEP-RL-WITH-ACCELERATED-CENTRAL-INFERENCE.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2022-06-%5Batc25%5D-GMI-DRL-Empowering-Multi-GPU-Deep-Reinforcement-Learning-with-GPU-Spatial-Multiplexing.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2023-03-DeepSpeed-Chat-Easy-Fast-and-Affordable-RLHF-Training-of-ChatGPT-like-Models-at-All-Scales.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2024-05-OpenRLHF-An-Easy-to-use-Scalable-and-High-performance-RLHF-Framework.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2024-09-RLHFuse-Efficient-RLHF-Training-for-Large-Language-Models-with-Inter--and-Intra-Stage-Fusion.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2024-10-%5Beurosys25%5D-HybridFlow-A-Flexible-and-Efficient-RLHF-Framework.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2025-04-DeepCoder-A-Fully-Open-Source-14B-Coder-at-O3-mini-Level.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2025-04-ReTool-Reinforcement-Learning-for-Strategic-Tool-Use-in-LLMs.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2025-04-StreamRL-Scalable-Heterogeneous-and-Elastic-RL-for-LLMs-with-Disaggregated-Stream-Generation.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2025-05-LlamaRL-A-Distributed-Asynchronous-Reinforcement-Learning-Framework-for-Efficient-Large-scale-LLM-Training.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2025-05-%5Bnips25%5D-AREAL-A-Large-Scale-Asynchronous-Reinforcement-Learning-System-for-Language-Reasoning.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2025-07-AsyncFlow-An-Asynchronous-Streaming-RL-Framework-for-Efficient-LLM-Post-Training.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2025-07-DistFlow-A-Fully-Distributed-RL-Framework-for-Scalable-and-Efficient-LLM-Post-Training.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2025-08-SeamlessFlow-A-TrainerAgent-Isolation-RL-Framework-Achieving-Bubble-Free-Pipelines-via-Tag-Scheduling.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2025-08-Your-Efficient-RL-Framework-Secretly-Brings-You-OffPolicy-RL-Training.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2025-08-rStar2-Agent-Agentic-Reasoning-Technical-Report.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2025-09-PipelineRL-Faster-On-policy-Reinforcement-Learning-for-Long-Sequence-Generation.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2025-10-Stabilizing-MoE-Reinforcement-Learning-by-Aligning-Training-and-Inference-Routers.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2025-11-Deterministic-Inference-Across-Tensor-Parallel-Sizes-That-Eliminates-TrainingInference-Mismatch.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2025-11-Seer-Online-Context-Learning-for-Fast-Synchronous-LLM-Reinforcement-Learning.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2025-11-Tree-Training-Accelerating-Agentic-LLMs-Training-via-Shared-Prefix-Reuse.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2025-12-RLAX-Large-Scale-Distributed-Reinforcement-Learning-for-Large-Language-Models-on-TPUs.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2026-01-Jet-RL-Enabling-On-Policy-FP8-Reinforcement-Learning-with-Unified-Training-and-Rollout-Precision-Flow.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2026-02-ECHO-2-A-Large-Scale-Distributed-Rollout-Framework-for-Cost-Efficient-Reinforcement-Learning.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2026-02-Forge-Scalable-Agent-RL-Framework-and-Algorithm.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2026-02-RLHFless-Serverless-Computing-for-Efficient-RLHF.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2026-03-ARL-Tangram-Unleash-the-Resource-Efficiency-in-Agentic-Reinforcement-Learning.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/rl/2026-03-Heddle-A-Distributed-Orchestration-System-for-Agentic-RL-Rollout.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2018-06-PipeDream-Fast-and-Efficient-Pipeline-Parallel-DNN-Training.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2019-07-%5Bcvpr19%5D-GPipe-Easy-Scaling-with-Micro-Batch-Pipeline-Parallelism.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2020-03-Megatron-LM-Training-Multi-Billion-Parameter-Language-Models-Using-Model-Parallelism.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2020-03-%5Bsc20%5D-ZeRO-Memory-Optimizations-Toward-Training-Trillion-Parameter-Models.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2020-06-GShard-Scaling-Giant-Models-with-Conditional-Computation-and-Automatic-Sharding.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2021-01-%5Batc21%5D-ZeRO-Offload-Democratizing-Billion-Scale-Model-Training.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2021-02-%5BFAST21%5D-CheckFreq-Frequent-Fine-Grained-DNN-Checkpointing.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2021-04-%5Bsc21%5D-ZeRO-Infinity-Breaking-the-GPU-Memory-Wall-for-Extreme-Scale-Deep-Learning.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2021-06-%5Biclr22%5D-LORA-LOW-RANK-ADAPTATION-OF-LARGE-LANGUAGE-MODELS.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2021-07-%5Bsc21%5D-Chimera-Efficiently-Training-Large-Scale-Neural-Networks-with-Bidirectional-Pipelines.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2021-08-%5Bsc23%5D-Efficient-Large-Scale-Language-Model-Training-on-GPU-Clusters-Using-Megatron-LM.html</loc>
<lastmod>2026-04-15T17:29:03+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2022-04-%5Bjmlr23%5D-PaLM-Scaling-Language-Modeling-with-Pathways.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2022-05-%5Bmlsys22%5D-PATHWAYS-ASYNCHRONOUS-DISTRIBUTED-DATAFLOW-FOR-ML.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2022-05-%5Bppopp22%5D-FasterMoE-Modeling-and-Optimizing-Training-of-Large-Scale-Dynamic-Pre-Trained-Models.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2022-11-%5Bmlsys23%5D-ON-OPTIMIZING-THE-COMMUNICATION-OF-MODEL-PARALLELISM.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2023-04-PyTorch-FSDP-Experiences-on-Scaling-Fully-Sharded-Data-Parallel.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2023-10-Fault-Tolerant-Hybrid-Parallel-Training-at-Scale-with-Reliable-and-Efficient-In-memory-Checkpointing.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2023-10-%5Bmlsys26%5D-FlexTrain-A-Dynamic-Training-Framework-for-Heterogeneous-Devices-Environments.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2023-10-%5Bsosp23%5D-Gemini-Fast-Failure-Recovery-in-Distributed-Training-with-In-Memory-Checkpoints.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2023-11-ZERO-BUBBLE-PIPELINE-PARALLELISM.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2024-02-%5Bnsdi24%5D-MegaScale-Scaling-Large-Language-Model-Training-to-More-Than-10000-GPUs.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2024-05-%5Bnips24%5D-Pipeline-Parallelism-with-Controllable-Memory.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2024-06-%5Batc25%5D-Universal-Checkpointing-A-Flexible-and-Efficient-Distributed-Checkpointing-System-for-Large-Scale-DNN-Training-with-Reco.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2024-06-%5Bmlsys26%5D-ProTrain-Efficient-LLM-Training-via-Adaptive-Memory-Management.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2024-07-Efficient-Training-of-Large-Language-Models-on-Distributed-Infrastructures-A-Survey.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2024-07-%5Batc24%5D-Accelerating-the-Training-of-Large-Language-Models-using-Efficient-Activation-Rematerialization-and-Optimal-Hybrid-Paral.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2024-08-%5Bsigcomm25%5D-DistTrain-Addressing-Model-and-Data-Heterogeneity-with-Disaggregated-Training-for-Multimodal-Large-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2024-09-%5Bmlsys26%5D-HexiScale-Accommodating-Large-Language-Model-Training-over-Heterogeneous-Environment.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2024-09-%5Bosdi25%5D-Domino-Eliminating-Communication-in-LLM-Training-via-Generic-Tensor-Slicing-and-Overlapping.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2024-11-%5Bnsdi25%5D-Minder-Faulty-Machine-Detection-for-Large-scale-Distributed-Model-Training.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-01-%5Batc25%5D-FlexPipe-Maximizing-Training-Efficiency-for-Transformer-based-Models-with-Variable-Length-Inputs.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-01-%5Batc25%5D-Jenga-Enhancing-LLM-Long-Context-Fine-tuning-with-Contextual-Token-Sparsity.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-01-%5Batc25%5D-Obscura-Concealing-Recomputation-Overhead-in-Training-of-Large-Language-Models-with-Bubble-filling-Pipeline-Transformati.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-01-%5Bnsdi25%5D-Accelerating-Design-Space-Exploration-for-LLM-Training-Systems-with-Multi-experiment-Parallel-Simulation.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-01-%5Bosdi25%5D-Enabling-Efficient-GPU-Communication-over-Multiple-NICs-with-FuseLink.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-01-%5Bosdi25%5D-Zen-Empowering-Distributed-Training-with-Sparsity-driven-Data-Synchronization.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-02-ByteScale-Efficient-Scaling-of-LLM-Training-with-a-2048K-Context-Length-on-More-Than-12000-GPUs.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-02-Training-LLMs-with-MXFP4.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-02-%5Bmlsys26%5D-DreamDDP-Accelerating-Data-Parallel-Distributed-LLM-Training-with-Layer-wise-Scheduled-Partial-Synchronization.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-02-%5Bppopp25%5D-Mario-Near-Zero-cost-Activation-Checkpointing-in-Pipeline-Parallelism.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-02-%5Bppopp25%5D-WeiPipe-Weight-Pipeline-Parallelism-for-Communication-Effective-Long-Context-Large-Model-Training.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-03-Numerical-Error-Analysis-of-Large-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-03-%5Bnips25%5D-Communication-Efficient-Language-Model-Training-Scales-Reliably-and-Robustly-Scaling-Laws-for-DiLoCo.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-03-%5Bosdi25%5D-Understanding-Stragglers-in-Large-Model-Training-Using-What-if-Analysis.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-03-%5Bosdi25%5D-WLB-LLM-Workload-Balanced-4D-Parallelism-for-Large-Language-Model-Training.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-04-%5Biclr25%5D-Scaling-FP8-Training-to-Trillion-Token-LLMs.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-04-%5Biclr25%5D-TORCHTITAN-One-Stop-PyTorch-Native-Solution-for-Production-Ready-LLM-Pretraining.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-04-%5Bnsdi25%5D-ByteCheckpoint-A-Unified-Checkpointing-System-for-Large-Foundation-Model-Development.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-04-%5Bnsdi25%5D-SimAI-Unifying-Architecture-Design-and-Performance-Tuning-for-Large-Scale-Large-Language-Model-Training-with-Scalability.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-06-%5Bisca25%5D-MeshSlice-Efficient-2D-Tensor-Parallelism-for-Distributed-DNN-Training.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-06-%5Bisca25%5D-Scaling-Llama-3-Training-with-Efficient-Parallelism-Strategies.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-06-%5Bnips25%5D-Cost-Efficient-LLM-Training-with-Lifetime-Aware-Tensor-Offloading-via-GPUDirect-Storage.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-06-%5Bnips25%5D-StreamBP-Memory-Efficient-Exact-Backpropagation-for-Long-Sequence-Training-of-LLMs.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-08-VeOmni-Scaling-Any-Modality-Model-Training-with-Model-Centric-Distributed-Recipe-Zoo.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-08-%5Batc25%5D-Optimus-Accelerating-Large-Scale-Multi-Modal-LLM-Training-by-Bubble-Exploitation.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-08-%5Bnips25%5D-FP4-All-the-Way-Fully-Quantized-Training-of-LLMs.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-09-Robust-LLM-Training-Infrastructure-at-ByteDance.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-09-%5Bnips25%5D-Efficient-Pre-Training-of-LLMs-via-Topology-Aware-Communication-Alignment-on-More-Than-9600-GPUs.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-10-MTraining-Efficient-Distributed-Training-for-Ultra-Long-Contexts-via-Dynamic-Sparse-Attention.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-10-%5Bmlsys26%5D-MTraining-Distributed-Dynamic-Sparse-Attention-for-Efficient-Ultra-Long-Context-Training.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-10-%5Bnips25%5D-Hierarchical-Balance-Packing-Towards-Efficient-Supervised-Fine-tuning-for-Long-Context-LLM.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-10-%5Bnips25%5D-Mixtures-of-Subspaces-for-Bandwidth-Efficient-Context-Parallel-Training.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-10-%5Bnips25%5D-Synergistic-Tensor-and-Pipeline-Parallelism.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-10-%5Bnips25%5D-Towards-Fully-FP8-GEMM-LLM-Training-at-Scale.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-11-%5Bnips25%5D-Quartet-Native-FP4-Training-Can-Be-Optimal-for-Large-Language-Models.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2025-11-%5Bnips25%5D-Tensor-Parallelism-with-Partially-Synchronized-Activations.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/llm/engineering/train/2026-03-%5Bmlsys26%5D-Nest-Network--and-Memory-Aware-Device-Placement-for-Distributed-Deep-Learning.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/2023-02-%5Bcgo23%5D-To-Pack-or-Not-to-Pack-A-Generalized-Packing-Analysis-and-Transformation.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/2024-01-%5Bcgo24%5D-PolyTOPS-Reconfigurable-and-Flexible-Polyhedral-Scheduler.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/2024-03-depyf-Open-the-Opaque-Box-of-PyTorch-Compiler-for-Machine-Learning-Researchers.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2020-09-FusionStitching-Boosting-Memory-Intensive-Computations-for-Deep-Learning-Workloads.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2021-02-%5Bcgo21%5D-MLIR-Scaling-Compiler-Infrastructure-for-Domain-Specific-Computation.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2021-02-%5Bcgo21%5D-Progressive-Raising-in-Multi-level-IR.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2021-08-%5Bsigplan21%5D-DNNFusion-Accelerating-Deep-Neural-Networks-Execution-with-Advanced-Operator-Fusion.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2021-12-%5Bmlsys22%5D-TORCHFX-PRACTICAL-PROGRAM-CAPTURE-AND-TRANSFORMATION-FOR-DEEP-LEARNING-IN-PYTHON.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2022-02-%5Basplos22%5D-AStitch-Enabling-a-New-Multi-dimensional-Optimization-Space-for-Memory-Intensive-ML-Training-and-Inference-on-Modern-SIM.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2022-02-%5Btpds22%5D-NeoFlow-A-Flexible-Framework-for-Enabling-Efficient-Compilation-for-High-Performance-DNN-Training.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2022-05-%5Bmlsys22%5D-DIETCODE-AUTOMATIC-OPTIMIZATION-FOR-DYNAMIC-TENSOR-PROGRAMS.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2022-07-%5Bosdi22%5D-Alpa-Automating-Inter--and-Intra-Operator-Parallelism-for-Distributed-Deep-Learning.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2022-07-%5Bosdi22%5D-Microsecond-scale-Preemption-for-Concurrent-GPU-accelerated-DNN-Inferences.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2022-07-%5Bosdi22%5D-Unity-Accelerating-DNN-Training-Through-Joint-Optimization-of-Algebraic-Transformations-and-Parallelization.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2022-09-%5Bmlsys22%5D-APOLLO-AUTOMATIC-PARTITION-BASED-OPERATOR-FUSION-THROUGHLAYER-BY-LAYER-OPTIMIZATION.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2023-01-%5Bcgo24%5D-oneDNN-Graph-Compiler-A-Hybrid-Approach-for-High-Performance-Deep-Learning-Compilation.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2023-05-%5Bmlsys23%5D-AUTOSCRATCH-ML-OPTIMIZED-CACHE-MANAGEMENT-FOR-INFERENCE-ORIENTED-GPUS.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2023-05-%5Bmlsys23%5D-SIRIUS-HARVESTING-WHOLE-PROGRAM-OPTIMIZATION-OPPORTUNITIESFOR-DNNS.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2023-05-%5Bmlsys24%5D-ACROBAT-OPTIMIZING-AUTO-BATCHING-OF-DYNAMIC-DEEP-LEARNING-AT-COMPILE-TIME.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2023-07-%5Bosdi23%5D-Effectively-Scheduling-Computational-Graphs-of-Deep-Neural-Networks-toward-Their-Domain-Specific-Accelerators.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2023-07-%5Bosdi23%5D-Optimizing-Dynamic-Neural-Networks-with-Brainstorm.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2024-02-%5Basplos24%5D-SoD2-Statically-Optimizing-Dynamic-Deep-Neural-Network-Execution.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2024-04-%5Basplos24%5D-MAGIS-Memory-Optimization-via-Coordinated-Graph-Transformation-and-Scheduling-for-DNN.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2024-04-%5Basplos24%5D-PyTorch-2-Faster-Machine-Learning-Through-Dynamic-Python-Bytecode-Transformation-and-Graph-Compilation.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2024-07-%5Batc24%5D-MagPy-Compiling-Eager-Mode-DNN-Programs-by-Monitoring-Execution-States.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2025-07-%5Bmicro25%5D-ELK-%20Exploring%20the%20Efficiency%20of%20Inter-core%20Connected%20AI%20Chips%20with%20Deep%20Learning%20Compiler%20Techniques%20.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/graph/2025-07-%5Bmicro25%5D-Elk-Exploring-the-Efficiency-of-Inter-core-Connected-AI-Chips-with-Deep-Learning-Compiler-Techniques.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2018-06-Tensor-Comprehensions-Framework-Agnostic-High-Performance-Machine-Learning-Abstractions.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2020-11-%5Bosdi20%5D-Ansor-Generating-High-Performance-Tensor-Programs-for-Deep-Learning.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2021-01-%5Bcgo21%5D-UNIT-Unifying-Tensorized-Instruction-Compilation.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2021-07-%5Bosdi21%5D-Pet-Optimizing-Tensor-Programs-with-Partially-Equivalent-Transformations-and-Automated-Corrections.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2021-10-%5Bmlsys22%5D-BOLT-BRIDGING-THE-GAP-BETWEEN-AUTO-TUNERS-AND-HARDWARE-NATIVE-PERFORMANCE.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2022-01-%5Bcgo22%5D-A-Compiler-Framework-for-Optimizing-Dynamic-parallelism-on-GPUs.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2022-07-%5Bosdi22%5D-Roller-Fast-and-Efficient-Tensor-Compilation-for-Deep-Learning.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2022-10-%5Basplos23%5D-Hidet-Task-Mapping-Programming-Paradigm-for-Deep-Learning-Tensor-Programs.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2022-10-%5Basplos23%5D-TensorIR-An-Abstraction-for-Automatic-Tensorized-Program-Optimization.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2022-11-%5Basplos23%5D-TLP-A-Deep-Learning-based-Cost-Model-for-Tensor-Program-Tuning.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2023-03-%5Basplos23%5D-Graphene-An-IR-for-Optimized-Tensor-Computations-on-GPUs.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2023-07-POWERFUSION-A-Tensor-Compiler-with-Explicit-Data-Movement-Description-and-Instruction-level-Graph-IR.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2023-07-%5Bosdi23%5D-Cocktailer-Analyzing-and-Optimizing-Dynamic-Control-Flow-in-Deep-Learning.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2023-07-%5Bosdi23%5D-EinNet-Optimizing-Tensor-Programs-with-Derivation-Based-Transformations.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2023-07-%5Bosdi23%5D-Welder-Scheduling-Deep-Learning-Memory-Access-via-Tile-graph.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2023-10-%5Bcgo24%5D-Tackling-the-Matrix-Multiplication-Micro-kernel-Generation-with-EXO.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2023-12-%5Bcgo23%5D-Experiences-Building-an-MLIR-based-SYCL-Compiler.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2023-12-%5Bcgo24%5D-JITSPMM-Just-in-Time-Instruction-Generation-for-Accelerated-Sparse-Matrix-Matrix-Multiplication.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2024-01-%5Basplos24%5D-Optimal-Kernel-Orchestration-for-Tensor-Programs-with-Korch.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2024-04-%5Basplos24%5D-Felix-Optimizing-Tensor-Programs-with-Gradient-Descent.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2024-04-%5Basplos24%5D-Hydride-A-Retargetable-and-Extensible-Synthesis-based-Compiler-for-Modern-Hardware-Architectures.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2024-04-%5Basplos24%5D-Optimizing-Deep-Learning-Inference-via-Global-Analysis-and-Tensor-Expressions.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2024-04-%5Basplos24%5D-Optimizing-Dynamic-Shape-Neural-Networks-on-Accelerators-via-On-the-Fly-Micro-Kernel-Polymerization.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2024-07-%5Bosdi24%5D-Enabling-Tensor-Language-Model-to-Assist-in-Generating-High-Performance-Tensor-Programs-for-Deep-Learning.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2024-07-%5Bosdi24%5D-LADDER-Enabling-Efficient-Low-Precision-Deep-Learning-Computing-through-Hardware-aware-Tensor-Transformation.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2024-09-%5Bcgo24%5D-PresCount-Effective-Register-Allocation-for-Bank-Conflict-Reduction.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2025-01-%5Bosdi25%5D-PipeThreader-Software-Defined-Pipelining-for-Efficient-DNN-Execution.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2025-05-%5Bosdi25%5D-KPerfIR-Towards-an-Open-and-Compiler-centric-Ecosystem-for-GPU-Kernel-Performance-Tooling-on-Modern-AI-Workloads.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/compiler/tensor/2025-09-%5Bmicro25%5D-StreamTensor-%20Make%20Tensors%20Stream%20in%20Dataflow%20Accelerators%20for%20LLMs%20.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/cpu/2022-05-Everything-You-Need-to-Know-About-the-CPU-Power-Management.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/cpu/2022-05-Understanding-BIOS-Configuration-for-Performance-Tuning.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/cpu/2025-10-%5Bmicro25%5D-DRAM-Fault-Classification-through-Large-Scale-Field-Monitoring-for-Robust-Memory-RAS-Management.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/framework/2022-01-%5Bosdi25%5D-Campo-Cost-Aware-Performance-Optimization-for-Mixed-Precision-Neural-Network-Training.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/framework/2026-01-VibeTensor-System-Software-for-Deep-Learning-Fully-Generated-by-AI-Agents.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2009-04-Roofline-An-Insightful-Visual-Performance-Model-for-Multicore-Architectures.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2010-01-Demystifying-GPU-Microarchitecture-through-Microbenchmarking.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2016-01-Single-pass-Parallel-Prefix-Scan-with-Decoupled-Look-back.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2016-04-Optimizing-Performance-of-Recurrent-Neural-Networks-on-GPUs.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2017-04-%5Bsigarch17%5D-Locality-Aware-CTA-Clustering-for-Modern-GPUs.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2017-05-Offloading-communication-control-logic-in-GPU-accelerated-applications.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2017-10-Optimizing-Cache-Bypassing-and-Warp-Scheduling-for-GPUs.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2018-03-Improving-Real-Time-Performance-with-CUDA-Persistent-Threads-CuPer-on-the-Jetson-TX2.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2018-04-%5Bjpdc18%5D-GPUDirect-Async-Exploring-GPU-synchronous-communication-techniques-for-InfiniBand-clusters.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2020-02-GPU-Initiated-OpenSHMEM-Correct-and-Eicient-Intra-Kernel-Networking-for-dGPUs.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2021-02-%5Bcgo21%5D-C-for-Metal-High-Performance-SIMD-Programming-on-Intel-GPUs.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2022-01-NVIDIA-H100-Tensor-Core-GPU-Architecture.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2022-01-%5Bcgo22%5D-DARM-Control-Flow-Melding-for-SIMT-Thread-Divergence-Reduction.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2022-06-%5Bics22%5D-Efficiently-Emulating-High-Bitwidth-Computation-with-Low-Bitwidth-Hardware.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2023-01-%5Bppopp23%5D-Stream-K-Work-centric-Parallel-Decomposition-for-Dense-Matrix-Matrix-Multiplication-on-the-GPU.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2023-01-%5Bvlsi23%5D-A-135-GBpsGbit-066-pJbit-Stacked-Embedded-DRAM-with-Multilayer-Arrays-by-Fine-Pitch-Hybrid-Bonding-and-Mini-TSV.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2023-05-%5Bcgo24%5D-A-Framework-for-Fine-Grained-Synchronization-of-Dependent-GPU-Kernels.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2023-05-%5Bmlsys23%5D-ALCOP-Automatic-Load-COmpute-Pipelining-in-Deep-Learning-Compiler-for-AI-GPUs.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2023-05-%5Brtas23%5D-Hardware-Compute-Partitioning-on-NVIDIA-GPUs.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2024-03-%5Bhpca24%5D-WASP-Exploiting-GPU-Pipeline-Parallelism-with-Hardware-Accelerated-Automatic-Warp-Specialization.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2024-06-%5Bisca24%5D-Mind-the-Gap-Attainable-Data-Movement-and-Operational-Intensity-Bounds-for-Tensor-Algorithms.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2024-07-NVIDIA-Blackwell-Architecture-Technical-Brief.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2024-09-%5Bcgo24%5D-Retargeting-and-Respecializing-GPU-Workloads-for-Performance-Portability.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2025-01-NVIDIA-Blackwell.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2025-01-NVIDIA-DGX-B300.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2025-01-NVIDIA-RTX-BLACKWELL-GPU-ARCHITECTURE.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2025-03-%5Bmicro25%5D-Dissecting-and-Modeling-the-Architecture-of-Modern-GPU-Cores.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2025-03-%5Bosdi25%5D-Neutrino-Fine-grained-GPU-Kernel-Profiling-via-Programmable-Probing.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2025-06-Serving-Large-Language-Models-on-Huawei-CloudMatrix384.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2025-07-Dissecting-the-NVIDIA-Blackwell-Architecture-with-Microbenchmarks.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2025-09-Categorical-Foundations-for-CuTe-Layouts.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2025-09-%5Bmicro25%5D-StreamTensor-Make-Tensors-Stream-in-Dataflow-Accelerators-for-LLMs.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2025-10-%5Bmicro25%5D-Coruscant-%20Co-Designing%20GPU%20Kernel%20and%20Sparse%20Tensor%20Core%20to%20Advocate%20Unstructured%20Sparsity%20in%20Efficient%20LLM%20Inference%20.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2025-10-%5Bmicro25%5D-DRAM%20Fault%20Classification%20through%20Large-Scale%20Field%20Monitoringfor%20Robust%20Memory%20RAS%20Management.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2025-10-%5Bmicro25%5D-Leveraging%20Chiplet-Locality%20for%20Efficient%20Memory%20Mapping%20in%20Multi-Chip%20Module%20GPUs.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2025-10-%5Bmicro25%5D-Leveraging-Chiplet-Locality-for-Efficient-Memory-Mapping-in-Multi-Chip-Module-GPUs.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2025-12-MMA-Sim-Bit-Accurate-Reference-Model-of-Tensor-Cores-and-Matrix-Cores.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2025-%5Bmicro25%5D-Dissecting%20and%20Modeling%20the%20Architecture%20of%20Modern%20GPU%20Cores.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/gpu/2026-02-CUDA-Agent-Large-Scale-Agentic-RL-for-High-Performance-CUDA-Kernel-Generation.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2014-11-Introducing-data-center-fabric-the-next-generation-Facebook-data-center-network.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2015-07-UCX-An-Open-Source-Framework-for-HPC-Network-APIs-and-Beyond.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2015-08-%5Bsigcomm15%5D-Jupiter-Rising-A-Decade-of-Clos-Topologies-and-Centralized-Control-in-Googles-Datacenter-Network.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2016-06-%5Batc16%5D-Design-Guidelines-for-High-Performance-RDMA-Systems.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2019-10-%5BMICRO19%5D-NetDIMM-Low-Latency-Near-Memory-Network-Interface-Architecture.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2020-08-%5Bsc20%5D-An-In-Depth-Analysis-of-the-Slingshot-Interconnect.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2022-02-Doubling-all2all-Performance-with-NVIDIA-Collective-Communication-Library-212.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2022-07-%5Batc20%5D-Reexamining-Direct-Cache-Access-to-Optimize-IO-Intensive-Applications-for-Multi-hundred-gigabit-Networks.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2022-11-Improving-Network-Performance-of-HPC-Systems-Using-NVIDIA-Magnum-IO-NVSHMEM-and-GPUDirect-Async.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2023-07-Overview-of-and-Motivation-for-the-Forthcoming-Ultra-Ethernet-Consortium-Specification.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2023-07-Rail-only-A-Low-Cost-High-Performance-Network-for-Training-LLMs-with-Trillion-Parameters.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2024-04-%5Basplos24%5D-Scaling-Up-Memory-Disaggregated-Applications-with-Smart.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2024-11-%5Bmicro24%5D-Uncovering-Real-GPU-NoC-Characteristics-Implications-on-Interconnect-Architecture.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2025-01-%5Bnsdi25%5D-AutoCCL-Automated-Collective-Communication-Tuning-for-Accelerating-Distributed-and-Parallel-DNN-Training.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2025-03-UB-Mesh-a-Hierarchically-Localized-nD-FullMesh-Datacenter-Network-Architecture.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2025-04-Introducing-UALink-200G-10-Specification.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2025-07-Demystifying-NCCL-An-In-depth-Analysis-of-GPU-Communications-Protocols-and-Algorithms.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2025-07-Scale-Up-Ethernet-Framework-Scale-Up-Ethernet-Framework-Specification.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2025-08-An-Extensible-Software-Transport-Layer-for-GPU-Networking.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2025-10-%5Bmicro25%5D-NetZIP-AlgorithmHardware-Co-design-of-In-network-Lossless-Compression-for-Distributed-Large-Model-Training.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2025-10-%5Bmicro25%5D-SkipReduce-Interconnection-Network-Sparsity-to-Accelerate-Distributed-Machine-Learning.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2025-10-%5Bmicro25%5D-SuperMesh-Energy-Efficient-Collective-Communications-for-Accelerators.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2025-11-GPU-Initiated-Networking-for-NCCL.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/2025-12-Beluga-A-CXL-Based-Memory-Architecture-for-Scalable-and-Efficient-LLM-KVCache-Management.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/nccl/2024-09-%5Bblog%5D-Memory-Efficiency-Faster-Initialization-and-Cost-Estimation-with-NVIDIA-Collective-Communications-Library-222.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/nccl/2025-01-%5Bblog%5D-New-Scaling-Algorithm-and-Initialization-with-NVIDIA-Collective-Communications-Library-223.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/nccl/2025-03-%5Bblog%5D-Networking-Reliability-and-Observability-at-Scale-with-NCCL-224.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/nccl/2025-06-%5Bblog%5D-Improved-Performance-and-Monitoring-Capabilities-with-NVIDIA-Collective-Communications-Library-226.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/nccl/2025-07-%5Bblog%5D-Enabling-Fast-Inference-and-Resilient-Training-with-NCCL-227.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/nccl/2025-10-%5Bmicro25%5D-NetZIP-%20Algorithm:Hardware%20Co-design%20of%20In-network%20LosslessCompression%20for%20Distributed%20Large%20Model%20Training.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/nccl/2025-10-%5Bmicro25%5D-Optimizing%20All-to-All%20Collective%20Communication%20with%20Fault%20Tolerance%20on%20Torus%20Networks%20.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/nccl/2025-10-%5Bmicro25%5D-SkipReduce-%20(Interconnection)%20Network%20Sparsity%20to%20AccelerateDistributed%20Machine%20Learning.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/nccl/2025-10-%5Bmicro25%5D-SuperMesh-%20Energy-Efficient%20Collective%20Communications%20for%20Accelerators.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/networking/nccl/2025-11-%5Bblog%5D-Fusing-Communication-and-Compute-with-New-Device-API-and-Copy-Engine-Collectives-in-NVIDIA-NCCL-228.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/system/2025-01-%5Bosdi25%5D-Principles-and-Methodologies-for-Serial-Performance-Optimization.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/system/2025-06-Decomposing-Craft-An-Elementary-Grammar-for-Sharing-Expertise-in-Craft-Workflows.html</loc>
<lastmod>2026-04-15T17:29:04+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/papers/mlsys/system/2025-06-%5Bmlsys26%5D-LEANN-A-Low-Storage-Overhead-Vector-Index.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/llm/engineering/inference/2025-03-%5Bgtc25%5D-FlashAttention-3-Fast-and-Accurate-Attention-with-Asynchrony-and-Low-precision.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/llm/engineering/inference/2025-05-%5Baiday25-bj%5D-TensorRT-LLM-PyTorch-A-New-Development-Paradigm-for-High-Performance-LLM-Inference.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/llm/engineering/inference/2025-05-%5Baiday25-bj%5D-TensorRT-LLM.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/llm/engineering/inference/2025-05-%5Baiday25-bj%5D-TensorRT-LLM%E9%A9%B1%E5%8A%A8DeepSeek%E6%80%A7%E8%83%BD%E6%9E%81%E9%99%90-%E5%8D%8F%E5%90%8C%E8%85%BE%E8%AE%AF%E8%81%94%E5%90%88%E4%BC%98%E5%8C%96%E5%AE%9E%E8%B7%B5.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/llm/engineering/inference/2025-11-%5Baiday25-hz%5D-A-Practical-Guide-to-Deploying-NVFP4-for-Efficient-Inference-on-Blackwell-GPUs.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/llm/engineering/inference/2025-11-%5Baiday25-hz%5D-Best-practice-of-Blackwell-GPU-deployment-in-the-Chinese-market.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/llm/engineering/inference/2025-11-%5Baiday25-hz%5D-Linear-Attention.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/llm/engineering/inference/2025-11-%5Baiday25-hz%5D-TensorRT-LLM-Large-scale-Expert-Parallelism-Optimizations.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/llm/engineering/train/2025-03-%5Bgtc25%5D-Profiling-Large-Language-Model-Trainings-on-the-Grace-Hopper-Superchip-using-Nsight-Systems.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/llm/engineering/train/2025-05-%5Baiday25-bj%5D-FP8-Training-Recipes-Performance-and-Convergence.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/llm/engineering/train/2025-05-%5Baiday25-bj%5D-MCore-MoE-in-2025---DeepSeek-V3-and-Beyond.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/llm/engineering/train/2025-05-%5Baiday25-bj%5D-Megatron-Core-Custom-FSDP.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/llm/engineering/train/2025-11-%5Baiday25-hz%5D-Best-Practice-of-MLA-Kernel-Optimization-on-Blackwell.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/llm/engineering/train/2025-11-%5Baiday25-hz%5D-Best-Practices-of-Reinforcement-Learning-with-verl.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/llm/engineering/train/2025-11-%5Baiday25-hz%5D-CUDA-Profiling-and-Debugging-Tools-for-LLM.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/llm/engineering/train/2025-11-%5Baiday25-hz%5D-DeepSeek-V3-Pre-training-Optimization-on-Grace-Blackwell.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/llm/engineering/train/2025-11-%5Baiday25-hz%5D-Distributed-Implementation-of-Muon-and-Emerging-Optimizers-in-Megatron-Core.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/llm/engineering/train/2025-11-%5Baiday25-hz%5D-Hybrid-EP-An-Efficient-MoE-Communication-Implementation.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/llm/engineering/train/2025-11-%5Baiday25-hz%5D-Megatron-Core-MoE-Updates---2025-H2.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/cpu/2025-03-%5Bgtc25%5D-Application-Optimization-for-NVIDIA-Grace-CPU.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/framework/2022-03-%5Bgtc22%5D-TPAT-TensorRT-Plugin-Autogen-Tool.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/2025-11-%5Baiday25-hz%5D-DeepGEMM-20-Technical-Overview.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda-math/2023-03-%5Bgtc23%5D-Recent-Developments-in-NVIDIA-Math-Libraries.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda-math/2023-03-%5Bgtc23%5D-cuNumeric-and-Legate-How-to-Create-a-Distributed-GPU-Accelerated-Library.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda-math/2024-03-%5Bgtc24%5D-Deep-Dive-into-Math-Libraries.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2007-Optimizing-Parallel-Reduction-in-CUDA.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2018-03-PROGRAMMING-TENSOR-CORES-NATIVE-VOLTA-TENSOR-CORE-GEMM.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2021-04-%5Bgtc21%5D-ACCELERATING-CONVOLUTION-WITH-TENSOR-CORES-IN-CUTLASS.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2022-03-%5Bgtc22%5D-ACCELERATING-BACKWARD-DATA-GRADIENT-BY-INCREASING-TENSOR-CORE-UTILIZATION-IN-CUTLASS.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2022-03-%5Bgtc22%5D-AUTOMATED-PERFORMANCE-IMPROVEMENT-USING-CUDA-LINK-TIME-OPTIMIZATION.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2022-03-%5Bgtc22%5D-CUDA-New-Features-and-Beyond.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2022-03-%5Bgtc22%5D-HOW-CUDA-PROGRAMMING-WORKS.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2022-03-%5Bgtc22%5D-INSIDE-THE-NVIDIA-HOPPER-ARCHITECTURE.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2022-03-%5Bgtc22%5D-OPTIMIZING-CUDA-APPLICATIONS-FOR-NVIDIA-HOPPER-ARCHITECTURE.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2023-03-%5Bgtc23%5D-CUDA-Graphs-101.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2023-03-%5Bgtc23%5D-CUDA-New-Features-and-Beyond.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2023-03-%5Bgtc23%5D-How-To-Write-A-CUDA-Program.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2023-03-%5Bgtc23%5D-Increasing-Data-Center-Efficiency-by-Optimizing-GPU-Utilization-Session-ID-S51297.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2023-03-%5Bgtc23%5D-Optimizing-at-Scale-Investigating-Hidden-Bottlenecks-in-Multi-Node-Workloads.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2023-03-%5Bgtc23%5D-Programming-Model-and-Applications-for-Grace-Hopper-Superchip.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2023-03-%5Bgtc23%5D-Robust-and-Efficient-CUDA-C-Concurrency-with-Stream-Ordered-Allocation.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2024-03-%5Bgtc24%5D-Advanced-Performance-Optimization-in-CUDA-S62192.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2024-03-%5Bgtc24%5D-CUDA-New-Features-and-Beyond.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2024-03-%5Bgtc24%5D-Grace-Hopper-Superchip-Architecture-and-Performance-Optimizations-for-AI-Applications.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2025-03-%5Bgtc23%5D-The-Performance-of-CUDA-with-the-Flexibility-of-PyTorch.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2025-03-%5Bgtc25%5D-1001-Ways-to-Write-CUDA-Kernels-in-Python.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2025-03-%5Bgtc25%5D-Accelerated-Python-The-Community-and-Ecosystem.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2025-03-%5Bgtc25%5D-Application-Optimization-for-NVIDIA-Grace-CPU.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2025-03-%5Bgtc25%5D-CUDA-New-Features-and-Beyond.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2025-03-%5Bgtc25%5D-CUDA-Techniques-to-Maximize-Compute-and-Instruction-Throughput-S72685.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2025-03-%5Bgtc25%5D-CUDA-Techniques-to-Maximize-Memory-Bandwidth-and-Hide-Latency-S72683.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2025-03-%5Bgtc25%5D-Get-the-most-performance-from-Grace-Hopper.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2025-03-%5Bgtc25%5D-How-To-Write-A-CUDA-Program-The-Parallel-Programming-Edition.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2025-03-%5Bgtc25%5D-How-You-Should-Write-a-CUDA-C-Kernel.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2025-03-%5Bgtc25%5D-How-to-Get-Data-Between-Storage-and-the-GPU-at-the-Speed-of-Light.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2025-03-%5Bgtc25%5D-Its-Easier-than-You-Think-Debugging-and-Optimizing-CUDA-with-Intelligent-Developer-Tools.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2025-03-%5Bgtc25%5D-Performance-Optimization-Tutorial-Part-3-S72686-CUDA-Techniques-to-Maximize-Concurrency-and-System-Utilization.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2025-03-%5Bgtc25%5D-THE-CUDA-C-DEVELOPERS-TOOLBOX.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2025-03-%5Bgtc25%5D-The-CUDA-Python-Developers-Toolbox.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2025-05-%5Baiday25-bj%5D-Balancing-the-Compute-Throughput-Latency-in-Async-Programming.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/2025-05-%5Baiday25-bj%5D-Optimizing-Memory-Bandwidth-and-Latency-on-Hopper-Blackwell.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/profile/2022-03-%5Bgtc22%5D-WHAT-WHERE-AND-WHY-USE-CUDA-DEVELOPER-TOOLS-TO-DETECT-LOCATE-AND-EXPLAIN-BUGS-AND-BOTTLENECKS.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/profile/2023-03-%5Bgtc23%5D-Become%20Faster%20in%20Writing%20Performant%20CUDA%20Kernels%20using%20the%20Source%20Page%20in%20Nsight%20Compute.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/profile/2023-03-%5Bgtc23%5D-Debugging-CUDA-An-Overview-of-CUDA-Correctness-Tools.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/profile/2023-03-%5Bgtc23%5D-S51205-From-the-Macro-to-the-Micro-CUDA-Developer-Tools-Find-and-Fix-Problems-at-Any-Scale.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/profile/2025-05-%5Bgtc25%5D-S72867-AI-Developer-Tools-for-Accelerated-Computing---Scarce-Data-Isnt-Scary.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cuda/profile/2025-11-%5Baiday25-hz%5D-CUDA-Profiling-and-Debugging-Tools-for-LLM.html</loc>
<lastmod>2026-04-15T17:29:05+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cutlass/2020-05-%5Bgtc21%5D-DEVELOPING-CUDA-KERNELS-TO-PUSH-TENSOR-CORES-TO-THE-ABSOLUTE-LIMIT-ON-NVIDIA-A100.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cutlass/2022-09-%5Bgtc22%5D-CUTLASS-Python-API-Enhancements-and-CUTLASS-30-Preview.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cutlass/2023-03-%5Bgtc23%5D-Developing-Optimal-CUDA-Kernels-on-Hopper-Tensor-Cores.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cutlass/2024-03-%5Bgtc24%5D-CUTLASS-A-Performant-Flexible-and-Portable-Way-to-Target-Hopper-Tensor-Cores.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cutlass/2025-03-%5Bgtc25%5D-Programming-Blackwell-Tensor-Cores-with-CuTe-and-CUTLASS.html.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cutlass/2025-03-%5Bgtc25%5D-USE-CUTLASS-TO-FUSE-MULTIPLE-GEMMS-TO-EXTREME-PERFORMANCE.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cutlass/2025-05-%5Baiday25-bj%5D-Enable-Tensor-Core-Programming-in-Python-with-CUTLASS-40.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/cutlass/2025-11-%5Baiday25-hz%5D-The-Evolution-and-Applications-of-CuTeDSL.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/hpc/2022-02-%5Bgtc22%5D-STANDARD-PARALLELISM.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/hpc/2022-03-%5Bgtc22%5D-WARP-A-HIGH-PERFORMANCE-PYTHON-FRAMEWORK-FOR-GPU-SIMULATION-AND-GRAPHICS.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/network/2022-03-%5Bgtc22%5D-FAST-INTER-GPU-COMMUNICATION-WITH-NCCL-FOR-DEEP-LEARNING-TRAINING-AND-MORE.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/network/2022-03-%5Bgtc22%5D-LATEST-ON-NVIDIA-MAGNUM-IO-GPUDIRECT-TECHNOLOGIES.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/network/2022-03-%5Bgtc22%5D-MULTI-GPU-PROGRAMMING-WITH-MPI.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/network/2023-03-%5Bgtc23%5D-Accelerating-data-movement-between-GPUs-and-storage-or-memory.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/network/2023-03-%5Bgtc23%5D-How-to-Streamline-Shared-Memory-Space-With-the-NVSHMEM-Communication-Library.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/network/2023-03-%5Bgtc23%5D-Scaling-Deep-Learning-Training-Fast-Inter-GPU-Communication-with-NCCL.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/network/2025-03-%5Bgtc23%5D-Become-Faster-in-Writing-Performant-CUDA-Kernels-using-the-Source-Page-in-Nsight-Compute.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/network/2025-03-%5Bgtc25%5D-Inter-GPU-Communication-Technology.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/gpu/triton/2025-03-%5Bgtc25%5D-Blackwell-Programming-for-the-Masses-With-OpenAI-Triton.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/networking/2018-RDMA-Tutorial.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/networking/2020-06-RDMA%20WITH%20GPU%20MEMORY%20VIA%20DMA-BUF.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/networking/2020-07-%5Batc20%5D-Reexamining-Direct-Cache-Access-to-Optimize-IO-Intensive-Applications-for-Multi-hundred-gigabit-Networks.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/mlsys/networking/RDMA-Aware-Networks-Programming-User-Manual.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/recsys/HSTU-attention-development-and-optimization-using-CutlassCuTe.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/recsys/RecSys-Example-HSTU-Model-Training-and-Inference-Best-Practice.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/robotics/Batching-Helpers-Optimizing-Loss-Computation.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/robotics/Bridge-the-Sim2Real-Gap-with-Neural-Actuator.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/robotics/IsaacSimLab-Benchmark.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
<url>
<loc>https://www.papercache.org/deepnotes-temp/slides/robotics/Video-Training-for-Assistance-Driving.html</loc>
<lastmod>2026-04-15T17:29:06+00:00</lastmod>
</url>
</urlset>
