static struct memory_locality *localities_types[4];
+struct target_cache {
+ struct list_head node;
+ struct node_cache_attrs cache_attrs;
+};
+
struct memory_target {
struct list_head node;
unsigned int memory_pxm;
unsigned int processor_pxm;
struct node_hmem_attrs hmem_attrs;
+ struct list_head caches;
};
struct memory_initiator {
target->memory_pxm = mem_pxm;
target->processor_pxm = PXM_INVAL;
list_add_tail(&target->node, &targets);
+ INIT_LIST_HEAD(&target->caches);
}
static __init const char *hmat_data_type(u8 type)
const unsigned long end)
{
struct acpi_hmat_cache *cache = (void *)header;
- struct node_cache_attrs cache_attrs;
+ struct memory_target *target;
+ struct target_cache *tcache;
u32 attrs;
if (cache->header.length < sizeof(*cache)) {
cache->memory_PD, cache->cache_size, attrs,
cache->number_of_SMBIOShandles);
- cache_attrs.size = cache->cache_size;
- cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
- cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
+ target = find_mem_target(cache->memory_PD);
+ if (!target)
+ return 0;
+
+ tcache = kzalloc(sizeof(*tcache), GFP_KERNEL);
+ if (!tcache) {
+ pr_notice_once("Failed to allocate HMAT cache info\n");
+ return 0;
+ }
+
+ tcache->cache_attrs.size = cache->cache_size;
+ tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
+ tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
case ACPI_HMAT_CA_DIRECT_MAPPED:
- cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
+ tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
break;
case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
- cache_attrs.indexing = NODE_CACHE_INDEXED;
+ tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
break;
case ACPI_HMAT_CA_NONE:
default:
- cache_attrs.indexing = NODE_CACHE_OTHER;
+ tcache->cache_attrs.indexing = NODE_CACHE_OTHER;
break;
}
switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) {
case ACPI_HMAT_CP_WB:
- cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
+ tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
break;
case ACPI_HMAT_CP_WT:
- cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
+ tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
break;
case ACPI_HMAT_CP_NONE:
default:
- cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
+ tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
break;
}
+ list_add_tail(&tcache->node, &target->caches);
- node_add_cache(pxm_to_node(cache->memory_PD), &cache_attrs);
return 0;
}
}
}
+static __init void hmat_register_target_cache(struct memory_target *target)
+{
+ unsigned mem_nid = pxm_to_node(target->memory_pxm);
+ struct target_cache *tcache;
+
+ list_for_each_entry(tcache, &target->caches, node)
+ node_add_cache(mem_nid, &tcache->cache_attrs);
+}
+
static __init void hmat_register_target_perf(struct memory_target *target)
{
unsigned mem_nid = pxm_to_node(target->memory_pxm);
node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
}
+static __init void hmat_register_target(struct memory_target *target)
+{
+ if (!node_online(pxm_to_node(target->memory_pxm)))
+ return;
+
+ hmat_register_target_initiators(target);
+ hmat_register_target_cache(target);
+ hmat_register_target_perf(target);
+}
+
static __init void hmat_register_targets(void)
{
struct memory_target *target;
- list_for_each_entry(target, &targets, node) {
- hmat_register_target_initiators(target);
- hmat_register_target_perf(target);
- }
+ list_for_each_entry(target, &targets, node)
+ hmat_register_target(target);
}
static __init void hmat_free_structures(void)
struct memory_target *target, *tnext;
struct memory_locality *loc, *lnext;
struct memory_initiator *initiator, *inext;
+ struct target_cache *tcache, *cnext;
list_for_each_entry_safe(target, tnext, &targets, node) {
+ list_for_each_entry_safe(tcache, cnext, &target->caches, node) {
+ list_del(&tcache->node);
+ kfree(tcache);
+ }
list_del(&target->node);
kfree(target);
}