}
static ssize_t osc_checksum_type_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
int i;
read_cum += r;
write_cum += w;
seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n",
- 1 << i, r, pct(r, read_tot),
- pct(read_cum, read_tot), w,
- pct(w, write_tot),
- pct(write_cum, write_tot));
+ 1 << i, r, pct(r, read_tot),
+ pct(read_cum, read_tot), w,
+ pct(w, write_tot),
+ pct(write_cum, write_tot));
if (read_cum == read_tot && write_cum == write_tot)
break;
}
read_cum += r;
write_cum += w;
seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n",
- i, r, pct(r, read_tot),
- pct(read_cum, read_tot), w,
- pct(w, write_tot),
- pct(write_cum, write_tot));
+ i, r, pct(r, read_tot),
+ pct(read_cum, read_tot), w,
+ pct(w, write_tot),
+ pct(write_cum, write_tot));
if (read_cum == read_tot && write_cum == write_tot)
break;
}
if (ext->oe_urgent)
list_move_tail(&ext->oe_link,
- &obj->oo_urgent_exts);
+ &obj->oo_urgent_exts);
}
osc_object_unlock(obj);
ext->oe_rc = rc ?: ext->oe_nr_pages;
EASSERT(ergo(rc == 0, ext->oe_state == OES_RPC), ext);
- list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
- oap_pending_item) {
+ list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) {
list_del_init(&oap->oap_rpc_item);
list_del_init(&oap->oap_pending_item);
if (last_off <= oap->oap_obj_off) {
goto out;
/* discard all pages with index greater then trunc_index */
- list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
- oap_pending_item) {
+ list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) {
struct cl_page *sub = oap2cl_page(oap);
struct cl_page *page = cl_page_top(sub);
LASSERT(osc_object_is_locked(obj));
while (!list_empty(&obj->oo_hp_exts)) {
ext = list_entry(obj->oo_hp_exts.next, struct osc_extent,
- oe_link);
+ oe_link);
LASSERT(ext->oe_state == OES_CACHE);
if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count,
&max_pages))
while (!list_empty(&obj->oo_urgent_exts)) {
ext = list_entry(obj->oo_urgent_exts.next,
- struct osc_extent, oe_link);
+ struct osc_extent, oe_link);
if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count,
&max_pages))
return page_count;
int rc = 0;
LASSERT(osc_object_is_locked(osc));
- list_for_each_entry_safe(ext, next,
- &osc->oo_reading_exts, oe_link) {
+ list_for_each_entry_safe(ext, next, &osc->oo_reading_exts, oe_link) {
EASSERT(ext->oe_state == OES_LOCK_DONE, ext);
if (!try_to_add_extent_for_io(cli, ext, &rpclist, &page_count,
&max_pages))
ext->oe_max_end <= end, ext);
osc_extent_state_set(ext, OES_LOCKING);
ext->oe_owner = current;
- list_move_tail(&ext->oe_link,
- &discard_list);
+ list_move_tail(&ext->oe_link, &discard_list);
osc_update_pending(obj, OBD_BRW_WRITE,
-ext->oe_nr_pages);
}
if (oap->oap_cmd & OBD_BRW_WRITE &&
!list_empty(&oap->oap_pending_item))
CL_PAGE_DEBUG(D_ERROR, env, page, "exists %llu/%s.\n",
- start, current->comm);
+ start, current->comm);
{
struct page *vmpage = cl_page_vmpage(env, page);
struct cl_lock *scan;
head = cl_object_header(apage->cp_obj);
- list_for_each_entry(scan, &head->coh_locks,
- cll_linkage)
+ list_for_each_entry(scan, &head->coh_locks, cll_linkage)
CL_LOCK_DEBUG(D_ERROR, env, scan,
"no cover page!\n");
CL_PAGE_DEBUG(D_ERROR, env, apage,
"Impossible state: %d\n", ols->ols_state);
LASSERTF(ergo(ols->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
- "lock = %p, ols = %p\n", lock, ols);
+ "lock = %p, ols = %p\n", lock, ols);
result = osc_lock_enqueue_wait(env, ols);
if (result == 0) {
ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname);
osc_lock_build_policy(env, lock, policy);
result = osc_enqueue_base(osc_export(obj), resname,
- &ols->ols_flags, policy,
- &ols->ols_lvb,
- obj->oo_oinfo->loi_kms_valid,
- osc_lock_upcall,
- ols, einfo, &ols->ols_handle,
- PTLRPCD_SET, 1, ols->ols_agl);
+ &ols->ols_flags, policy,
+ &ols->ols_lvb,
+ obj->oo_oinfo->loi_kms_valid,
+ osc_lock_upcall,
+ ols, einfo, &ols->ols_handle,
+ PTLRPCD_SET, 1, ols->ols_agl);
if (result != 0) {
cl_lock_user_del(env, lock);
cl_lock_unhold(env, lock, "upcall", lock);
if (descr->cld_mode >= CLM_WRITE) {
result = osc_cache_writeback_range(env, obj,
- descr->cld_start, descr->cld_end,
- 1, discard);
+ descr->cld_start,
+ descr->cld_end,
+ 1, discard);
LDLM_DEBUG(ols->ols_lock,
- "lock %p: %d pages were %s.\n", lock, result,
- discard ? "discarded" : "written");
+ "lock %p: %d pages were %s.\n", lock, result,
+ discard ? "discarded" : "written");
if (result > 0)
result = 0;
}
clk->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx\n",
- lock, clk, clk->ols_flags);
+ lock, clk, clk->ols_flags);
result = 0;
} else
*/
if (olock &&
atomic_add_return(_PAGEREF_MAGIC,
- &olock->ols_pageref) != _PAGEREF_MAGIC) {
+ &olock->ols_pageref) != _PAGEREF_MAGIC) {
atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
rc = 1;
}
};
int osc_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, struct page *vmpage)
{
struct osc_object *osc = cl2osc(obj);
struct osc_page *opg = cl_object_page_slice(obj, page);
opg->ops_to = PAGE_CACHE_SIZE;
result = osc_prep_async_page(osc, opg, vmpage,
- cl_offset(obj, page->cp_index));
+ cl_offset(obj, page->cp_index));
if (result == 0) {
struct osc_io *oio = osc_env_io(env);
opg->ops_srvlock = osc_io_srvlock(oio);
- cl_page_slice_add(page, &opg->ops_cl, obj,
- &osc_page_ops);
+ cl_page_slice_add(page, &opg->ops_cl, obj, &osc_page_ops);
}
/*
* Cannot assert osc_page_protected() here as read-ahead
oap->oap_brw_flags = brw_flags | OBD_BRW_SYNC;
if (!client_is_remote(osc_export(obj)) &&
- capable(CFS_CAP_SYS_RESOURCE)) {
+ capable(CFS_CAP_SYS_RESOURCE)) {
oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
oap->oap_cmd |= OBD_BRW_NOQUOTA;
}
break;
opg = list_entry(cli->cl_lru_list.next, struct osc_page,
- ops_lru);
+ ops_lru);
page = cl_page_top(opg->ops_cl.cpl_page);
if (cl_page_in_use_noref(page)) {
list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
rc = osc_lru_shrink(cli, lru_shrink_min);
if (rc != 0) {
CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n",
- cli->cl_import->imp_obd->obd_name, rc, cli);
+ cli->cl_import->imp_obd->obd_name, rc, cli);
return rc;
}
CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n",
- cli->cl_import->imp_obd->obd_name, cli,
- atomic_read(&cli->cl_lru_in_list),
- atomic_read(&cli->cl_lru_busy));
+ cli->cl_import->imp_obd->obd_name, cli,
+ atomic_read(&cli->cl_lru_in_list),
+ atomic_read(&cli->cl_lru_busy));
/* Reclaim LRU slots from other client_obd as it can't free enough
* from its own. This should rarely happen.
max_scans = atomic_read(&cache->ccc_users);
while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) {
cli = list_entry(cache->ccc_lru.next, struct client_obd,
- cl_lru_osc);
+ cl_lru_osc);
CDEBUG(D_CACHE, "%s: cli %p LRU pages: %d, busy: %d.\n",
- cli->cl_import->imp_obd->obd_name, cli,
- atomic_read(&cli->cl_lru_in_list),
- atomic_read(&cli->cl_lru_busy));
+ cli->cl_import->imp_obd->obd_name, cli,
+ atomic_read(&cli->cl_lru_in_list),
+ atomic_read(&cli->cl_lru_busy));
list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
if (atomic_read(&cli->cl_lru_in_list) > 0) {
spin_unlock(&cache->ccc_lru_lock);
CDEBUG(D_CACHE, "%s: cli %p freed %d pages.\n",
- cli->cl_import->imp_obd->obd_name, cli, rc);
+ cli->cl_import->imp_obd->obd_name, cli, rc);
return rc;
}
gen = atomic_read(&cli->cl_lru_in_list);
rc = l_wait_event(osc_lru_waitq,
- atomic_read(cli->cl_lru_left) > 0 ||
- (atomic_read(&cli->cl_lru_in_list) > 0 &&
- gen != atomic_read(&cli->cl_lru_in_list)),
- &lwi);
+ atomic_read(cli->cl_lru_left) > 0 ||
+ (atomic_read(&cli->cl_lru_in_list) > 0 &&
+ gen != atomic_read(&cli->cl_lru_in_list)),
+ &lwi);
atomic_dec(&osc_lru_waiters);
if (rc < 0)
{
struct client_obd *client;
- list_for_each_entry(client, &item->ti_obd_list,
- cl_grant_shrink_list) {
+ list_for_each_entry(client, &item->ti_obd_list, cl_grant_shrink_list) {
if (osc_should_shrink_grant(client))
osc_shrink_grant(client);
}
&client->cl_grant_shrink_list);
if (rc) {
CERROR("add grant client %s error %d\n",
- client->cl_import->imp_obd->obd_name, rc);
+ client->cl_import->imp_obd->obd_name, rc);
return rc;
}
CDEBUG(D_CACHE, "add grant client %s\n",
if (remote_rcs[i] != 0) {
CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
- i, remote_rcs[i], req);
+ i, remote_rcs[i], req);
return -EPROTO;
}
}
kunmap(pga[i]->pg);
}
cfs_crypto_hash_update_page(hdesc, pga[i]->pg,
- pga[i]->off & ~CFS_PAGE_MASK,
+ pga[i]->off & ~CFS_PAGE_MASK,
count);
CDEBUG(D_PAGE,
"page %p map %p index %lu flags %lx count %u priv %0lx: off %d\n",
if (rc != req->rq_bulk->bd_nob_transferred) {
CERROR("Unexpected rc %d (%d transferred)\n",
- rc, req->rq_bulk->bd_nob_transferred);
+ rc, req->rq_bulk->bd_nob_transferred);
return -EPROTO;
}
sort_brw_pages(pga, page_count);
rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
- pga, &req, 1, 0);
+ pga, &req, 1, 0);
if (rc != 0) {
CERROR("prep_req failed: %d\n", rc);
goto out;
tmp = oap;
if (oap->oap_interrupted && !req->rq_intr) {
CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
- oap, req);
+ oap, req);
ptlrpc_mark_interrupted(req);
}
}
*/
while (!list_empty(ext_list)) {
ext = list_entry(ext_list->next, struct osc_extent,
- oe_link);
+ oe_link);
list_del_init(&ext->oe_link);
osc_extent_finish(env, ext, 0, rc);
}