u32 return_target, return_dwords;
u32 link_target, link_dwords;
bool switch_context = gpu->exec_state != exec_state;
+ unsigned int new_flush_seq = READ_ONCE(gpu->mmu->flush_seq);
+ bool need_flush = gpu->flush_seq != new_flush_seq;
lockdep_assert_held(&gpu->lock);
* need to append a mmu flush load state, followed by a new
* link to this buffer - a total of four additional words.
*/
- if (gpu->mmu->need_flush || switch_context) {
+ if (need_flush || switch_context) {
u32 target, extra_dwords;
/* link command */
extra_dwords = 1;
/* flush command */
- if (gpu->mmu->need_flush) {
+ if (need_flush) {
if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
extra_dwords += 1;
else
target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
- if (gpu->mmu->need_flush) {
+ if (need_flush) {
/* Add the MMU flush */
if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
SYNC_RECIPIENT_PE);
}
- gpu->mmu->need_flush = false;
+ gpu->flush_seq = new_flush_seq;
}
if (switch_context) {
}
list_add_tail(&mapping->mmu_node, &mmu->mappings);
- mmu->need_flush = true;
+ mmu->flush_seq++;
unlock:
mutex_unlock(&mmu->lock);
etnaviv_iommu_remove_mapping(mmu, mapping);
list_del(&mapping->mmu_node);
- mmu->need_flush = true;
+ mmu->flush_seq++;
mutex_unlock(&mmu->lock);
}
return ret;
}
- mmu->need_flush = true;
+ mmu->flush_seq++;
}
list_add_tail(&mapping->mmu_node, &mmu->mappings);